diff --git a/.gitignore b/.gitignore index b4ec8795057..5d7dbbefdc8 100644 --- a/.gitignore +++ b/.gitignore @@ -38,11 +38,14 @@ dependency-reduced-pom.xml # osx stuff .DS_Store +# default folders in which the create_bwc_index.py expects to find old es versions in +/backwards +/dev-tools/backwards + # needed in case docs build is run...maybe we can configure doc build to generate files under build? html_docs # random old stuff that we should look at the necessity of... /tmp/ -backwards/ eclipse-build diff --git a/TESTING.asciidoc b/TESTING.asciidoc index dcd6c9981be..a1a01a8f231 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -25,12 +25,6 @@ run it using Gradle: gradle run ------------------------------------- -or to attach a remote debugger, run it as: - -------------------------------------- -gradle run --debug-jvm -------------------------------------- - === Test case filtering. - `tests.class` is a class-filtering shell-like glob pattern, @@ -430,15 +424,33 @@ cd $BATS_ARCHIVES sudo -E bats $BATS_TESTS/*.bats ------------------------------------------------- -Note: Starting vagrant VM outside of the elasticsearch folder requires to -indicates the folder that contains the Vagrantfile using the VAGRANT_CWD -environment variable: +You can also use Gradle to prepare the test environment and then starts a single VM: ------------------------------------------------- -gradle vagrantSetUp -VAGRANT_CWD=/path/to/elasticsearch vagrant up centos-7 --provider virtualbox +gradle vagrantFedora24#up ------------------------------------------------- +Or any of vagrantCentos6#up, vagrantDebian8#up, vagrantFedora24#up, vagrantOel6#up, +vagrantOel7#up, vagrantOpensuse13#up, vagrantSles12#up, vagrantUbuntu1204#up, +vagrantUbuntu1604#up. + +Once up, you can then connect to the VM using SSH from the elasticsearch directory: + +------------------------------------------------- +vagrant ssh fedora-24 +------------------------------------------------- + +Or from another directory: + +------------------------------------------------- +VAGRANT_CWD=/path/to/elasticsearch vagrant ssh fedora-24 +------------------------------------------------- + +Note: Starting vagrant VM outside of the elasticsearch folder requires to +indicates the folder that contains the Vagrantfile using the VAGRANT_CWD +environment variable. + + == Coverage analysis Tests can be run instrumented with jacoco to produce a coverage report in @@ -462,7 +474,7 @@ Combined (Unit+Integration) coverage: mvn -Dtests.coverage verify jacoco:report --------------------------------------------------------------------------- -== Debugging from an IDE +== Launching and debugging from an IDE If you want to run elasticsearch from your IDE, the `gradle run` task supports a remote debugging option: @@ -471,6 +483,17 @@ supports a remote debugging option: gradle run --debug-jvm --------------------------------------------------------------------------- +== Debugging remotely from an IDE + +If you want to run Elasticsearch and be able to remotely attach the process +for debugging purposes from your IDE, can start Elasticsearch using `ES_JAVA_OPTS`: + +--------------------------------------------------------------------------- +ES_JAVA_OPTS="-Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=4000,suspend=y" ./bin/elasticsearch +--------------------------------------------------------------------------- + +Read your IDE documentation for how to attach a debugger to a JVM process. + == Building with extra plugins Additional plugins may be built alongside elasticsearch, where their dependency on elasticsearch will be substituted with the local elasticsearch diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 36732215d43..fe6d7b59eb3 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -55,7 +55,7 @@ dependencies { runtime 'org.apache.commons:commons-math3:3.2' } -compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" +compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked,-processing" // enable the JMH's BenchmarkProcessor to generate the final benchmark classes // needs to be added separately otherwise Gradle will quote it and javac will fail compileJava.options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"]) diff --git a/build.gradle b/build.gradle index ba19a993bdc..fd97470ec6c 100644 --- a/build.gradle +++ b/build.gradle @@ -17,6 +17,7 @@ * under the License. */ +import java.nio.file.Path import org.eclipse.jgit.lib.Repository import org.eclipse.jgit.lib.RepositoryBuilder import org.gradle.plugins.ide.eclipse.model.SourceFolder @@ -29,8 +30,9 @@ subprojects { description = "Elasticsearch subproject ${project.path}" } +Path rootPath = rootDir.toPath() // setup pom license info, but only for artifacts that are part of elasticsearch -configure(subprojects.findAll { it.path.startsWith(':x-plugins') == false }) { +configure(subprojects.findAll { it.projectDir.toPath().startsWith(rootPath) }) { // we only use maven publish to add tasks for pom generation plugins.withType(MavenPublishPlugin).whenPluginAdded { @@ -204,6 +206,14 @@ allprojects { } } } + + task cleanIdeaBuildDir(type: Delete) { + delete 'build-idea' + } + cleanIdeaBuildDir.setGroup("ide") + cleanIdeaBuildDir.setDescription("Deletes the IDEA build directory.") + + tasks.cleanIdea.dependsOn(cleanIdeaBuildDir) } idea { diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 0e8c2dc1412..65c2c00dd13 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -23,14 +23,12 @@ apply plugin: 'groovy' group = 'org.elasticsearch.gradle' -// TODO: remove this when upgrading to a version that supports ProgressLogger -// gradle 2.14 made internal apis unavailable to plugins, and gradle considered -// ProgressLogger to be an internal api. Until this is made available again, -// we can't upgrade without losing our nice progress logging -// NOTE that this check duplicates that in BuildPlugin, but we need to check -// early here before trying to compile the broken classes in buildSrc -if (GradleVersion.current() != GradleVersion.version('2.13')) { - throw new GradleException('Gradle 2.13 is required to build elasticsearch') +if (GradleVersion.current() < GradleVersion.version('2.13')) { + throw new GradleException('Gradle 2.13+ is required to build elasticsearch') +} + +if (JavaVersion.current() < JavaVersion.VERSION_1_8) { + throw new GradleException('Java 1.8 is required to build elasticsearch gradle tools') } if (project == rootProject) { @@ -96,9 +94,26 @@ dependencies { compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE.... compile 'de.thetaphi:forbiddenapis:2.2' compile 'org.apache.rat:apache-rat:0.11' - compile 'ru.vyarus:gradle-animalsniffer-plugin:1.0.1' } +// Gradle version-specific options (allows build to run with Gradle 2.13 as well as 2.14+/3.+) +if (GradleVersion.current() == GradleVersion.version("2.13")) { + // ProgressLogger(-Factory) classes are part of the public Gradle API + sourceSets.main.groovy.srcDir 'src/main/gradle-2.13-groovy' + + dependencies { + compile 'ru.vyarus:gradle-animalsniffer-plugin:1.0.1' // last version compatible with Gradle 2.13 + } +} else { + // Gradle 2.14+ removed ProgressLogger(-Factory) classes from the public APIs + // Use logging dependency instead + sourceSets.main.groovy.srcDir 'src/main/gradle-2.14-groovy' + + dependencies { + compileOnly "org.gradle:gradle-logging:${GradleVersion.current().getVersion()}" + compile 'ru.vyarus:gradle-animalsniffer-plugin:1.2.0' // Gradle 2.14 requires a version > 1.0.1 + } +} /***************************************************************************** * Bootstrap repositories * diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommandRegistry.java b/buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLogger.groovy similarity index 67% rename from core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommandRegistry.java rename to buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLogger.groovy index 27c1f074c40..5c02e255a1a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommandRegistry.java +++ b/buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLogger.groovy @@ -16,16 +16,16 @@ * specific language governing permissions and limitations * under the License. */ - -package org.elasticsearch.cluster.routing.allocation.command; - -import org.elasticsearch.common.xcontent.ParseFieldRegistry; +package org.elasticsearch.gradle /** - * Registry of allocation commands. This is it's own class just to make Guice happy. + * Wraps a ProgressLogger so that code in src/main/groovy does not need to + * define imports on Gradle 2.13/2.14+ ProgressLoggers */ -public class AllocationCommandRegistry extends ParseFieldRegistry> { - public AllocationCommandRegistry() { - super("allocation_command"); +class ProgressLogger { + @Delegate org.gradle.logging.ProgressLogger progressLogger + + ProgressLogger(org.gradle.logging.ProgressLogger progressLogger) { + this.progressLogger = progressLogger } } diff --git a/buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy b/buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy new file mode 100644 index 00000000000..290c4d581d6 --- /dev/null +++ b/buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle + +import org.gradle.logging.ProgressLoggerFactory + +import javax.inject.Inject + +/** + * Allows to inject a ProgressLoggerFactory to tasks in src/main/groovy + * without requiring the corresponding import of ProgressLoggerFactory, + * making it compatible with both Gradle 2.13 and 2.14+. + */ +trait ProgressLoggerFactoryInjection { + @Inject + ProgressLoggerFactory getProgressLoggerFactory() { + throw new UnsupportedOperationException() + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestParseException.java b/buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLogger.groovy similarity index 66% rename from test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestParseException.java rename to buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLogger.groovy index 594f701c79a..2c9fab78b43 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestParseException.java +++ b/buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLogger.groovy @@ -16,18 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.test.rest.yaml.parser; +package org.elasticsearch.gradle /** - * Exception thrown whenever there is a problem parsing any of the REST test suite fragment + * Wraps a ProgressLogger so that code in src/main/groovy does not need to + * define imports on Gradle 2.13/2.14+ ProgressLoggers */ -public class ClientYamlTestParseException extends Exception { +class ProgressLogger { + @Delegate org.gradle.internal.logging.progress.ProgressLogger progressLogger - ClientYamlTestParseException(String message) { - super(message); - } - - ClientYamlTestParseException(String message, Throwable cause) { - super(message, cause); + ProgressLogger(org.gradle.internal.logging.progress.ProgressLogger progressLogger) { + this.progressLogger = progressLogger } } diff --git a/buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy b/buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy new file mode 100644 index 00000000000..8891d65611a --- /dev/null +++ b/buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle + +import org.gradle.internal.logging.progress.ProgressLoggerFactory + +import javax.inject.Inject + +/** + * Allows to inject a ProgressLoggerFactory to tasks in src/main/groovy + * without requiring the corresponding import of ProgressLoggerFactory, + * making it compatible with both Gradle 2.13 and 2.14+. + */ +trait ProgressLoggerFactoryInjection { + @Inject + ProgressLoggerFactory getProgressLoggerFactory() { + throw new UnsupportedOperationException() + } +} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy index b28e7210ea4..ecb50ce80b5 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy @@ -8,6 +8,7 @@ import org.apache.tools.ant.BuildException import org.apache.tools.ant.DefaultLogger import org.apache.tools.ant.RuntimeConfigurable import org.apache.tools.ant.UnknownElement +import org.elasticsearch.gradle.ProgressLoggerFactoryInjection import org.gradle.api.DefaultTask import org.gradle.api.file.FileCollection import org.gradle.api.file.FileTreeElement @@ -19,12 +20,9 @@ import org.gradle.api.tasks.Optional import org.gradle.api.tasks.TaskAction import org.gradle.api.tasks.util.PatternFilterable import org.gradle.api.tasks.util.PatternSet -import org.gradle.logging.ProgressLoggerFactory import org.gradle.util.ConfigureUtil -import javax.inject.Inject - -class RandomizedTestingTask extends DefaultTask { +class RandomizedTestingTask extends DefaultTask implements ProgressLoggerFactoryInjection { // TODO: change to "executable" to match gradle test params? @Optional @@ -81,6 +79,7 @@ class RandomizedTestingTask extends DefaultTask { String argLine = null Map systemProperties = new HashMap<>() + Map environmentVariables = new HashMap<>() PatternFilterable patternSet = new PatternSet() RandomizedTestingTask() { @@ -89,11 +88,6 @@ class RandomizedTestingTask extends DefaultTask { listenersConfig.listeners.add(new TestReportLogger(logger: logger, config: testLoggingConfig)) } - @Inject - ProgressLoggerFactory getProgressLoggerFactory() { - throw new UnsupportedOperationException(); - } - void jvmArgs(Iterable arguments) { jvmArgs.addAll(arguments) } @@ -106,6 +100,10 @@ class RandomizedTestingTask extends DefaultTask { systemProperties.put(property, value) } + void environment(String key, Object value) { + environmentVariables.put(key, value) + } + void include(String... includes) { this.patternSet.include(includes); } @@ -194,7 +192,8 @@ class RandomizedTestingTask extends DefaultTask { haltOnFailure: true, // we want to capture when a build failed, but will decide whether to rethrow later shuffleOnSlave: shuffleOnSlave, leaveTemporary: leaveTemporary, - ifNoTests: ifNoTests + ifNoTests: ifNoTests, + newenvironment: true ] DefaultLogger listener = null @@ -250,6 +249,9 @@ class RandomizedTestingTask extends DefaultTask { for (Map.Entry prop : systemProperties) { sysproperty key: prop.getKey(), value: prop.getValue().toString() } + for (Map.Entry envvar : environmentVariables) { + env key: envvar.getKey(), value: envvar.getValue().toString() + } makeListeners() } } catch (BuildException e) { diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy index 14f5d476be3..a9786935c56 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy @@ -25,8 +25,7 @@ import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener -import org.gradle.logging.ProgressLogger -import org.gradle.logging.ProgressLoggerFactory +import org.elasticsearch.gradle.ProgressLogger import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.ERROR @@ -52,8 +51,6 @@ import static java.lang.Math.max * quick. */ class TestProgressLogger implements AggregatedEventListener { - /** Factory to build a progress logger when testing starts */ - ProgressLoggerFactory factory ProgressLogger progressLogger int totalSuites int totalSlaves @@ -79,12 +76,15 @@ class TestProgressLogger implements AggregatedEventListener { /* Note that we probably overuse volatile here but it isn't hurting us and lets us move things around without worying about breaking things. */ + TestProgressLogger(Map args) { + progressLogger = new ProgressLogger(args.factory.newOperation(TestProgressLogger)) + progressLogger.setDescription('Randomized test runner') + } + @Subscribe void onStart(AggregatedStartEvent e) throws IOException { totalSuites = e.suiteCount totalSlaves = e.slaveCount - progressLogger = factory.newOperation(TestProgressLogger) - progressLogger.setDescription('Randomized test runner') progressLogger.started() progressLogger.progress( "Starting JUnit4 for ${totalSuites} suites on ${totalSlaves} jvms") diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 628e59de1a6..b49cb52ee7f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -18,9 +18,11 @@ */ package org.elasticsearch.gradle +import com.carrotsearch.gradle.junit4.RandomizedTestingTask import nebula.plugin.extraconfigurations.ProvidedBasePlugin import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.GradleException +import org.gradle.api.InvalidUserDataException import org.gradle.api.JavaVersion import org.gradle.api.Plugin import org.gradle.api.Project @@ -54,6 +56,11 @@ class BuildPlugin implements Plugin { @Override void apply(Project project) { + if (project.pluginManager.hasPlugin('elasticsearch.standalone-rest-test')) { + throw new InvalidUserDataException('elasticsearch.standalone-test, ' + + 'elasticearch.standalone-rest-test, and elasticsearch.build ' + + 'are mutually exclusive') + } project.pluginManager.apply('java') project.pluginManager.apply('carrotsearch.randomized-testing') // these plugins add lots of info to our jars @@ -195,18 +202,13 @@ class BuildPlugin implements Plugin { /** Runs the given javascript using jjs from the jdk, and returns the output */ private static String runJavascript(Project project, String javaHome, String script) { - File tmpScript = File.createTempFile('es-gradle-tmp', '.js') - tmpScript.setText(script, 'UTF-8') ByteArrayOutputStream output = new ByteArrayOutputStream() - ExecResult result = project.exec { - executable = new File(javaHome, 'bin/jjs') - args tmpScript.toString() + project.exec { + executable = new File(javaHome, 'bin/jrunscript') + args '-e', script standardOutput = output errorOutput = new ByteArrayOutputStream() - ignoreExitValue = true // we do not fail so we can first cleanup the tmp file } - java.nio.file.Files.delete(tmpScript.toPath()) - result.assertNormalExitValue() return output.toString('UTF-8').trim() } @@ -412,8 +414,10 @@ class BuildPlugin implements Plugin { // hack until gradle supports java 9's new "--release" arg assert minimumJava == JavaVersion.VERSION_1_8 options.compilerArgs << '--release' << '8' - project.sourceCompatibility = null - project.targetCompatibility = null + doFirst{ + sourceCompatibility = null + targetCompatibility = null + } } } } @@ -509,11 +513,9 @@ class BuildPlugin implements Plugin { } } - // System assertions (-esa) are disabled for now because of what looks like a - // JDK bug triggered by Groovy on JDK7. We should look at re-enabling system - // assertions when we upgrade to a new version of Groovy (currently 2.4.4) or - // require JDK8. See https://issues.apache.org/jira/browse/GROOVY-7528. - enableSystemAssertions false + boolean assertionsEnabled = Boolean.parseBoolean(System.getProperty('tests.asserts', 'true')) + enableSystemAssertions assertionsEnabled + enableAssertions assertionsEnabled testLogging { showNumFailuresAtEnd 25 @@ -554,11 +556,22 @@ class BuildPlugin implements Plugin { /** Configures the test task */ static Task configureTest(Project project) { - Task test = project.tasks.getByName('test') + RandomizedTestingTask test = project.tasks.getByName('test') test.configure(commonTestConfig(project)) test.configure { include '**/*Tests.class' } + + // Add a method to create additional unit tests for a project, which will share the same + // randomized testing setup, but by default run no tests. + project.extensions.add('additionalTest', { String name, Closure config -> + RandomizedTestingTask additionalTest = project.tasks.create(name, RandomizedTestingTask.class) + additionalTest.classpath = test.classpath + additionalTest.testClassesDir = test.testClassesDir + additionalTest.configure(commonTestConfig(project)) + additionalTest.configure(config) + test.dependsOn(additionalTest) + }); return test } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index a46a7bda374..66f9f0d4c4e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -30,6 +30,7 @@ public class DocsTestPlugin extends RestTestPlugin { @Override public void apply(Project project) { + project.pluginManager.apply('elasticsearch.standalone-rest-test') super.apply(project) Map defaultSubstitutions = [ /* These match up with the asciidoc syntax for substitutions but diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index f451beeceb8..96b7ac42527 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -139,6 +139,7 @@ class PrecommitTasks { configProperties = [ suppressions: checkstyleSuppressions ] + toolVersion = 7.5 } for (String taskName : ['checkstyleMain', 'checkstyleTest']) { Task task = project.tasks.findByName(taskName) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index ca4957f7a6c..8d65f8c0d60 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -73,8 +73,8 @@ class ClusterConfiguration { @Input String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') + - " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + - " " + System.getProperty('tests.jvm.argline', '') + " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + + " " + System.getProperty('tests.jvm.argline', '') /** * A closure to call which returns the unicast host to connect to for cluster formation. diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 74cae08298b..3fc622ef5aa 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -39,6 +39,7 @@ import org.gradle.api.tasks.Delete import org.gradle.api.tasks.Exec import java.nio.file.Paths +import java.util.concurrent.TimeUnit /** * A helper for creating tasks to build a cluster that is used by a task, and tear down the cluster when the task is finished. @@ -91,6 +92,8 @@ class ClusterFormationTasks { configureBwcPluginDependency("${task.name}_elasticsearchBwcPlugins", project, entry.getValue(), project.configurations.elasticsearchBwcPlugins, config.bwcVersion) } + project.configurations.elasticsearchBwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS) + project.configurations.elasticsearchBwcPlugins.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS) } for (int i = 0; i < config.numNodes; i++) { // we start N nodes and out of these N nodes there might be M bwc nodes. @@ -268,12 +271,12 @@ class ClusterFormationTasks { static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node, NodeInfo seedNode) { Map esConfig = [ 'cluster.name' : node.clusterName, + 'node.name' : "node-" + node.nodeNum, 'pidfile' : node.pidFile, 'path.repo' : "${node.sharedDir}/repo", 'path.shared_data' : "${node.sharedDir}/", // Define a node attribute so we can test that it exists - 'node.attr.testattr' : 'test', - 'repositories.url.allowed_urls': 'http://snapshot.test*' + 'node.attr.testattr' : 'test' ] // we set min master nodes to the total number of nodes in the cluster and // basically skip initial state recovery to allow the cluster to form using a realistic master election diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/MessyTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/MessyTestPlugin.groovy index 1cca2c5aa49..1c0aec1bc00 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/MessyTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/MessyTestPlugin.groovy @@ -48,7 +48,7 @@ class MessyTestPlugin extends StandaloneTestPlugin { } private static addPluginResources(Project project, Project pluginProject) { - String outputDir = "generated-resources/${pluginProject.name}" + String outputDir = "${project.buildDir}/generated-resources/${pluginProject.name}" String taskName = ClusterFormationTasks.pluginTaskName("copy", pluginProject.name, "Metadata") Copy copyPluginMetadata = project.tasks.create(taskName, Copy.class) copyPluginMetadata.into(outputDir) @@ -57,7 +57,7 @@ class MessyTestPlugin extends StandaloneTestPlugin { project.sourceSets.test.output.dir(outputDir, builtBy: taskName) // add each generated dir to the test classpath in IDEs - //project.eclipse.classpath.sourceSets = [project.sourceSets.test] project.idea.module.singleEntryLibraries= ['TEST': [project.file(outputDir)]] + // Eclipse doesn't need this because it gets the entire module as a dependency } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index a9473cc28d2..73f32961fb3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -151,6 +151,9 @@ class NodeInfo { args.addAll("-E", "node.portsfile=true") String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ") String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs + if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) { + esJavaOpts += " -ea -esa" + } env.put('ES_JAVA_OPTS', esJavaOpts) for (Map.Entry property : System.properties.entrySet()) { if (property.key.startsWith('tests.es.')) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy index dc9aa769388..47a559efccb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy @@ -18,15 +18,29 @@ */ package org.elasticsearch.gradle.test +import org.elasticsearch.gradle.BuildPlugin +import org.gradle.api.InvalidUserDataException import org.gradle.api.Plugin import org.gradle.api.Project -/** A plugin to add rest integration tests. Used for qa projects. */ +/** + * Adds support for starting an Elasticsearch cluster before running integration + * tests. Used in conjunction with {@link StandaloneRestTestPlugin} for qa + * projects and in conjunction with {@link BuildPlugin} for testing the rest + * client. + */ public class RestTestPlugin implements Plugin { + List REQUIRED_PLUGINS = [ + 'elasticsearch.build', + 'elasticsearch.standalone-rest-test'] @Override public void apply(Project project) { - project.pluginManager.apply(StandaloneTestBasePlugin) + if (false == REQUIRED_PLUGINS.any {project.pluginManager.hasPlugin(it)}) { + throw new InvalidUserDataException('elasticsearch.rest-test ' + + 'requires either elasticsearch.build or ' + + 'elasticsearch.standalone-rest-test') + } RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class) integTest.cluster.distribution = 'zip' // rest tests should run with the real zip diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy similarity index 77% rename from buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy rename to buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index af2b20e4abf..c48dc890ab0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -24,15 +24,26 @@ import com.carrotsearch.gradle.junit4.RandomizedTestingPlugin import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.precommit.PrecommitTasks +import org.gradle.api.InvalidUserDataException import org.gradle.api.Plugin import org.gradle.api.Project +import org.gradle.api.Task import org.gradle.api.plugins.JavaBasePlugin -/** Configures the build to have a rest integration test. */ -public class StandaloneTestBasePlugin implements Plugin { +/** + * Configures the build to compile tests against Elasticsearch's test framework + * and run REST tests. Use BuildPlugin if you want to build main code as well + * as tests. + */ +public class StandaloneRestTestPlugin implements Plugin { @Override public void apply(Project project) { + if (project.pluginManager.hasPlugin('elasticsearch.build')) { + throw new InvalidUserDataException('elasticsearch.standalone-test ' + + 'elasticsearch.standalone-rest-test, and elasticsearch.build ' + + 'are mutually exclusive') + } project.pluginManager.apply(JavaBasePlugin) project.pluginManager.apply(RandomizedTestingPlugin) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy index fefd08fe4e5..de52d75c600 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy @@ -25,12 +25,15 @@ import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.plugins.JavaBasePlugin -/** A plugin to add tests only. Used for QA tests that run arbitrary unit tests. */ +/** + * Configures the build to compile against Elasticsearch's test framework and + * run integration and unit tests. Use BuildPlugin if you want to build main + * code as well as tests. */ public class StandaloneTestPlugin implements Plugin { @Override public void apply(Project project) { - project.pluginManager.apply(StandaloneTestBasePlugin) + project.pluginManager.apply(StandaloneRestTestPlugin) Map testOptions = [ name: 'test', diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/TestWithDependenciesPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/TestWithDependenciesPlugin.groovy new file mode 100644 index 00000000000..7e370fd69e2 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/TestWithDependenciesPlugin.groovy @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.test + +import org.elasticsearch.gradle.plugin.PluginBuildPlugin +import org.gradle.api.Plugin +import org.gradle.api.Project +import org.gradle.api.artifacts.Dependency +import org.gradle.api.artifacts.ProjectDependency +import org.gradle.api.tasks.Copy + +/** + * A plugin to run tests that depend on other plugins or modules. + * + * This plugin will add the plugin-metadata and properties files for each + * dependency to the test source set. + */ +class TestWithDependenciesPlugin implements Plugin { + + @Override + void apply(Project project) { + if (project.isEclipse) { + /* The changes this plugin makes both break and aren't needed by + * Eclipse. This is because Eclipse flattens main and test + * dependencies into a single dependency. Because Eclipse is + * "special".... */ + return + } + + project.configurations.testCompile.dependencies.all { Dependency dep -> + // this closure is run every time a compile dependency is added + if (dep instanceof ProjectDependency && dep.dependencyProject.plugins.hasPlugin(PluginBuildPlugin)) { + project.gradle.projectsEvaluated { + addPluginResources(project, dep.dependencyProject) + } + } + } + } + + private static addPluginResources(Project project, Project pluginProject) { + String outputDir = "${project.buildDir}/generated-resources/${pluginProject.name}" + String taskName = ClusterFormationTasks.pluginTaskName("copy", pluginProject.name, "Metadata") + Copy copyPluginMetadata = project.tasks.create(taskName, Copy.class) + copyPluginMetadata.into(outputDir) + copyPluginMetadata.from(pluginProject.tasks.pluginProperties) + copyPluginMetadata.from(pluginProject.file('src/main/plugin-metadata')) + project.sourceSets.test.output.dir(outputDir, builtBy: taskName) + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy index 3f980c57a49..85fd433bc77 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy @@ -20,10 +20,9 @@ package org.elasticsearch.gradle.vagrant import com.carrotsearch.gradle.junit4.LoggingOutputStream import groovy.transform.PackageScope +import org.elasticsearch.gradle.ProgressLogger import org.gradle.api.GradleScriptException import org.gradle.api.logging.Logger -import org.gradle.logging.ProgressLogger -import org.gradle.logging.ProgressLoggerFactory import java.util.regex.Matcher @@ -49,7 +48,7 @@ public class TapLoggerOutputStream extends LoggingOutputStream { TapLoggerOutputStream(Map args) { logger = args.logger - progressLogger = args.factory.newOperation(VagrantLoggerOutputStream) + progressLogger = new ProgressLogger(args.factory.newOperation(VagrantLoggerOutputStream)) progressLogger.setDescription("TAP output for `${args.command}`") } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy index ecba08d7d4c..cd4d4bf87a5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy @@ -19,17 +19,15 @@ package org.elasticsearch.gradle.vagrant import org.apache.commons.io.output.TeeOutputStream +import org.elasticsearch.gradle.ProgressLoggerFactoryInjection import org.elasticsearch.gradle.LoggedExec import org.gradle.api.tasks.Input -import org.gradle.logging.ProgressLoggerFactory - -import javax.inject.Inject /** * Runs a vagrant command. Pretty much like Exec task but with a nicer output * formatter and defaults to `vagrant` as first part of commandLine. */ -public class VagrantCommandTask extends LoggedExec { +public class VagrantCommandTask extends LoggedExec implements ProgressLoggerFactoryInjection { @Input String boxName @@ -57,9 +55,4 @@ public class VagrantCommandTask extends LoggedExec { stuff starts with ==> $box */ squashedPrefix: "==> $boxName: ") } - - @Inject - ProgressLoggerFactory getProgressLoggerFactory() { - throw new UnsupportedOperationException(); - } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy index 331a638b5ca..de6c5a36db9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy @@ -19,9 +19,7 @@ package org.elasticsearch.gradle.vagrant import com.carrotsearch.gradle.junit4.LoggingOutputStream -import org.gradle.api.logging.Logger -import org.gradle.logging.ProgressLogger -import org.gradle.logging.ProgressLoggerFactory +import org.elasticsearch.gradle.ProgressLogger /** * Adapts an OutputStream being written to by vagrant into a ProcessLogger. It @@ -55,7 +53,7 @@ public class VagrantLoggerOutputStream extends LoggingOutputStream { private String heading = '' VagrantLoggerOutputStream(Map args) { - progressLogger = args.factory.newOperation(VagrantLoggerOutputStream) + progressLogger = new ProgressLogger(args.factory.newOperation(VagrantLoggerOutputStream)) progressLogger.setDescription("Vagrant output for `$args.command`") squashedPrefix = args.squashedPrefix } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index a5bb054a8b6..0b7a105e8ab 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -404,10 +404,6 @@ class VagrantTestPlugin implements Plugin { args 'halt', box } stop.dependsOn(halt) - if (project.extensions.esvagrant.boxes.contains(box) == false) { - // we only need a halt task if this box was not specified - continue; - } Task update = project.tasks.create("vagrant${boxTask}#update", VagrantCommandTask) { boxName box @@ -435,6 +431,11 @@ class VagrantTestPlugin implements Plugin { dependsOn update } + if (project.extensions.esvagrant.boxes.contains(box) == false) { + // we d'ont need tests tasks if this box was not specified + continue; + } + Task smoke = project.tasks.create("vagrant${boxTask}#smoketest", Exec) { environment vagrantEnvVars dependsOn up diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.standalone-rest-test.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.standalone-rest-test.properties new file mode 100644 index 00000000000..2daf4dc27c0 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.standalone-rest-test.properties @@ -0,0 +1,20 @@ +# +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +implementation-class=org.elasticsearch.gradle.test.StandaloneRestTestPlugin diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.test-with-dependencies.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.test-with-dependencies.properties new file mode 100644 index 00000000000..bcb374a85c6 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.test-with-dependencies.properties @@ -0,0 +1,20 @@ +# +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +implementation-class=org.elasticsearch.gradle.test.TestWithDependenciesPlugin diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index eba6dbfc819..a800411cc63 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -10,9 +10,6 @@ - - - @@ -29,7 +26,6 @@ - @@ -56,7 +52,6 @@ - @@ -135,15 +130,12 @@ - - - @@ -165,19 +157,16 @@ - - - @@ -195,7 +184,6 @@ - @@ -212,7 +200,6 @@ - @@ -234,7 +221,6 @@ - @@ -260,10 +246,8 @@ - - @@ -315,7 +299,6 @@ - @@ -338,7 +321,6 @@ - @@ -349,16 +331,13 @@ - - - @@ -392,7 +371,6 @@ - @@ -406,8 +384,6 @@ - - @@ -432,14 +408,12 @@ - - @@ -450,10 +424,7 @@ - - - @@ -475,18 +446,12 @@ - - - - - - @@ -498,7 +463,6 @@ - @@ -515,7 +479,6 @@ - @@ -548,7 +511,6 @@ - @@ -576,8 +538,6 @@ - - @@ -591,9 +551,6 @@ - - - @@ -669,7 +626,6 @@ - @@ -704,8 +660,6 @@ - - @@ -789,7 +743,6 @@ - @@ -801,7 +754,6 @@ - @@ -819,7 +771,6 @@ - @@ -924,7 +875,6 @@ - @@ -935,9 +885,8 @@ - - + @@ -994,13 +943,8 @@ - - - - - @@ -1012,7 +956,6 @@ - diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 44835f7227c..ddbf87c4a71 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,10 +1,11 @@ +# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy elasticsearch = 6.0.0-alpha1 -lucene = 6.4.0-snapshot-ec38570 +lucene = 6.4.0 # optional dependencies spatial4j = 0.6 jts = 1.13 -jackson = 2.8.1 +jackson = 2.8.6 snakeyaml = 1.15 # When updating log4j, please update also docs/java-api/index.asciidoc log4j = 2.7 @@ -15,10 +16,16 @@ jna = 4.2.2 randomizedrunner = 2.4.0 junit = 4.11 httpclient = 4.5.2 +# When updating httpcore, please also update core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy httpcore = 4.4.5 +# When updating httpasyncclient, please also update core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +httpasyncclient = 4.1.2 commonslogging = 1.1.3 commonscodec = 1.10 hamcrest = 1.3 securemock = 1.2 +# When updating mocksocket, please also update core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +mocksocket = 1.1 + # benchmark dependencies jmh = 1.17.3 diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java index 214a75d12cc..e9cde26e6c8 100644 --- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java +++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java @@ -95,7 +95,7 @@ public class BulkBenchmarkTask implements BenchmarkTask { private final BlockingQueue> bulkQueue; private final int bulkSize; - public LoadGenerator(Path bulkDataFile, BlockingQueue> bulkQueue, int bulkSize) { + LoadGenerator(Path bulkDataFile, BlockingQueue> bulkQueue, int bulkSize) { this.bulkDataFile = bulkDataFile; this.bulkQueue = bulkQueue; this.bulkSize = bulkSize; @@ -143,7 +143,7 @@ public class BulkBenchmarkTask implements BenchmarkTask { private final BulkRequestExecutor bulkRequestExecutor; private final SampleRecorder sampleRecorder; - public BulkIndexer(BlockingQueue> bulkData, int warmupIterations, int measurementIterations, + BulkIndexer(BlockingQueue> bulkData, int warmupIterations, int measurementIterations, SampleRecorder sampleRecorder, BulkRequestExecutor bulkRequestExecutor) { this.bulkData = bulkData; this.warmupIterations = warmupIterations; diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java index b342d93fba5..9210526e7c8 100644 --- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java +++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java @@ -73,7 +73,7 @@ public final class RestClientBenchmark extends AbstractBenchmark { private final RestClient client; private final String actionMetaData; - public RestBulkRequestExecutor(RestClient client, String index, String type) { + RestBulkRequestExecutor(RestClient client, String index, String type) { this.client = client; this.actionMetaData = String.format(Locale.ROOT, "{ \"index\" : { \"_index\" : \"%s\", \"_type\" : \"%s\" } }%n", index, type); } diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java index 6d6e5ade827..d2aee2251a6 100644 --- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java +++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java @@ -28,6 +28,7 @@ import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugin.noop.NoopPlugin; import org.elasticsearch.plugin.noop.action.bulk.NoopBulkAction; @@ -70,7 +71,7 @@ public final class TransportClientBenchmark extends AbstractBenchmark bulkData) { NoopBulkRequestBuilder builder = NoopBulkAction.INSTANCE.newRequestBuilder(client); for (String bulkItem : bulkData) { - builder.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8))); + builder.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8), XContentType.JSON)); } BulkResponse bulkResponse; try { diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/NoopPlugin.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/NoopPlugin.java index ac45f20dc25..e8ed27715c1 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/NoopPlugin.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/NoopPlugin.java @@ -23,15 +23,23 @@ import org.elasticsearch.plugin.noop.action.bulk.RestNoopBulkAction; import org.elasticsearch.plugin.noop.action.bulk.TransportNoopBulkAction; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.plugin.noop.action.search.NoopSearchAction; import org.elasticsearch.plugin.noop.action.search.RestNoopSearchAction; import org.elasticsearch.plugin.noop.action.search.TransportNoopSearchAction; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import java.util.Arrays; import java.util.List; +import java.util.function.Supplier; public class NoopPlugin extends Plugin implements ActionPlugin { @Override @@ -43,7 +51,11 @@ public class NoopPlugin extends Plugin implements ActionPlugin { } @Override - public List> getRestHandlers() { - return Arrays.asList(RestNoopBulkAction.class, RestNoopSearchAction.class); + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + return Arrays.asList( + new RestNoopBulkAction(settings, restController), + new RestNoopSearchAction(settings, restController)); } } diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java index ceaf9f8cc9d..1034e722e87 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java @@ -33,6 +33,7 @@ import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; public class NoopBulkRequestBuilder extends ActionRequestBuilder implements WriteRequestBuilder { @@ -95,17 +96,17 @@ public class NoopBulkRequestBuilder extends ActionRequestBuilder { @@ -91,7 +90,7 @@ public class RestNoopBulkAction extends BaseRestHandler { private final RestRequest request; - public BulkRestBuilderListener(RestChannel channel, RestRequest request) { + BulkRestBuilderListener(RestChannel channel, RestRequest request) { super(channel); this.request = request; } @@ -103,9 +102,7 @@ public class RestNoopBulkAction extends BaseRestHandler { builder.field(Fields.ERRORS, false); builder.startArray(Fields.ITEMS); for (int idx = 0; idx < bulkRequest.numberOfActions(); idx++) { - builder.startObject(); ITEM_RESPONSE.toXContent(builder, request); - builder.endObject(); } builder.endArray(); builder.endObject(); diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java index 9bcde4ca399..48a453c3725 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.plugin.noop.action.search; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; @@ -33,8 +32,6 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestNoopSearchAction extends BaseRestHandler { - - @Inject public RestNoopSearchAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(GET, "/_noop_search", this); diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle new file mode 100644 index 00000000000..162e8608d44 --- /dev/null +++ b/client/rest-high-level/build.gradle @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.rest-test' + +group = 'org.elasticsearch.client' + +dependencies { + compile "org.elasticsearch:elasticsearch:${version}" + compile "org.elasticsearch.client:rest:${version}" + + testCompile "org.elasticsearch.client:test:${version}" + testCompile "org.elasticsearch.test:framework:${version}" + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" +} + +dependencyLicenses { + // Don't check licenses for dependency that are part of the elasticsearch project + // But any other dependency should have its license/notice/sha1 + dependencies = project.configurations.runtime.fileCollection { + it.group.startsWith('org.elasticsearch') == false + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/SlashStrategy.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java similarity index 50% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/SlashStrategy.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 698a9dfc364..58ecc5f9c2d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/SlashStrategy.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -17,33 +17,37 @@ * under the License. */ -package org.elasticsearch.painless.antlr; +package org.elasticsearch.client; -import org.antlr.v4.runtime.Token; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.Header; + +import java.io.IOException; +import java.util.Objects; /** - * Utility to figure out if a {@code /} is division or the start of a regex literal. + * High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses. + * The provided {@link RestClient} is externally built and closed. */ -public class SlashStrategy { - public static boolean slashIsRegex(PainlessLexer lexer) { - EnhancedPainlessLexer realLexer = (EnhancedPainlessLexer) lexer; - Token lastToken = realLexer.getPreviousToken(); - if (lastToken == null) { +public final class RestHighLevelClient { + + private static final Log logger = LogFactory.getLog(RestHighLevelClient.class); + + private final RestClient client; + + public RestHighLevelClient(RestClient client) { + this.client = Objects.requireNonNull(client); + } + + public boolean ping(Header... headers) { + try { + client.performRequest("HEAD", "/", headers); return true; - } - switch (lastToken.getType()) { - case PainlessLexer.RBRACE: - case PainlessLexer.RP: - case PainlessLexer.OCTAL: - case PainlessLexer.HEX: - case PainlessLexer.INTEGER: - case PainlessLexer.DECIMAL: - case PainlessLexer.ID: - case PainlessLexer.DOTINTEGER: - case PainlessLexer.DOTID: + } catch(IOException exception) { return false; - default: - return true; } } + + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/MatchParser.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java similarity index 53% rename from test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/MatchParser.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index f456c32d092..bc12b1433d7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/MatchParser.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -16,23 +16,33 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.test.rest.yaml.parser; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.xcontent.XContentLocation; -import org.elasticsearch.test.rest.yaml.section.MatchAssertion; +package org.elasticsearch.client; + +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.AfterClass; +import org.junit.Before; import java.io.IOException; -/** - * Parser for match assert sections - */ -public class MatchParser implements ClientYamlTestFragmentParser { +public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase { - @Override - public MatchAssertion parse(ClientYamlTestSuiteParseContext parseContext) throws IOException, ClientYamlTestParseException { - XContentLocation location = parseContext.parser().getTokenLocation(); - Tuple stringObjectTuple = parseContext.parseTuple(); - return new MatchAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); + private static RestHighLevelClient restHighLevelClient; + + @Before + public void initHighLevelClient() throws IOException { + super.initClient(); + if (restHighLevelClient == null) { + restHighLevelClient = new RestHighLevelClient(client()); + } + } + + @AfterClass + public static void cleanupClient() throws IOException { + restHighLevelClient = null; + } + + protected static RestHighLevelClient highLevelClient() { + return restHighLevelClient; } } diff --git a/core/src/main/java/org/elasticsearch/http/HttpServerAdapter.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MainActionIT.java similarity index 71% rename from core/src/main/java/org/elasticsearch/http/HttpServerAdapter.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/MainActionIT.java index a7e61143893..717ab7a44f3 100644 --- a/core/src/main/java/org/elasticsearch/http/HttpServerAdapter.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MainActionIT.java @@ -17,14 +17,11 @@ * under the License. */ -package org.elasticsearch.http; +package org.elasticsearch.client; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.rest.RestChannel; -import org.elasticsearch.rest.RestRequest; - -public interface HttpServerAdapter { - - void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext context); +public class MainActionIT extends ESRestHighLevelClientTestCase { + public void testPing() { + assertTrue(highLevelClient().ping()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java new file mode 100644 index 00000000000..7d513e48998 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.Header; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; +import org.mockito.ArgumentMatcher; +import org.mockito.internal.matchers.ArrayEquals; +import org.mockito.internal.matchers.VarargMatcher; + +import java.io.IOException; +import java.net.SocketTimeoutException; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.argThat; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RestHighLevelClientTests extends ESTestCase { + + private RestClient restClient; + private RestHighLevelClient restHighLevelClient; + + @Before + public void initClient() throws IOException { + restClient = mock(RestClient.class); + restHighLevelClient = new RestHighLevelClient(restClient); + } + + public void testPing() throws IOException { + assertTrue(restHighLevelClient.ping()); + verify(restClient).performRequest(eq("HEAD"), eq("/"), argThat(new HeadersVarargMatcher())); + } + + public void testPingFailure() throws IOException { + when(restClient.performRequest(any(), any())).thenThrow(new IllegalStateException()); + expectThrows(IllegalStateException.class, () -> restHighLevelClient.ping()); + } + + public void testPingFailed() throws IOException { + when(restClient.performRequest(any(), any())).thenThrow(new SocketTimeoutException()); + assertFalse(restHighLevelClient.ping()); + } + + public void testPingWithHeaders() throws IOException { + Header[] headers = RestClientTestUtil.randomHeaders(random(), "Header"); + assertTrue(restHighLevelClient.ping(headers)); + verify(restClient).performRequest(eq("HEAD"), eq("/"), argThat(new HeadersVarargMatcher(headers))); + } + + private class HeadersVarargMatcher extends ArgumentMatcher implements VarargMatcher { + private Header[] expectedHeaders; + + HeadersVarargMatcher(Header... expectedHeaders) { + this.expectedHeaders = expectedHeaders; + } + + @Override + public boolean matches(Object varargArgument) { + if (varargArgument instanceof Header[]) { + Header[] actualHeaders = (Header[]) varargArgument; + return new ArrayEquals(expectedHeaders).matches(actualHeaders); + } + return false; + } + } +} diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 1c92013da97..d5d9c9cfbb5 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -33,7 +33,7 @@ group = 'org.elasticsearch.client' dependencies { compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" - compile "org.apache.httpcomponents:httpasyncclient:4.1.2" + compile "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" compile "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile "commons-logging:commons-logging:${versions.commonslogging}" @@ -43,6 +43,7 @@ dependencies { testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" testCompile "org.elasticsearch:securemock:${versions.securemock}" + testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}" testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15" signature "org.codehaus.mojo.signature:java17:1.0@signature" } diff --git a/client/rest/src/main/java/org/elasticsearch/client/HttpAsyncResponseConsumerFactory.java b/client/rest/src/main/java/org/elasticsearch/client/HttpAsyncResponseConsumerFactory.java index a5e5b39bed5..528fb9a7fc8 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/HttpAsyncResponseConsumerFactory.java +++ b/client/rest/src/main/java/org/elasticsearch/client/HttpAsyncResponseConsumerFactory.java @@ -53,7 +53,7 @@ interface HttpAsyncResponseConsumerFactory { private final int bufferLimit; - public HeapBufferedResponseConsumerFactory(int bufferLimitBytes) { + HeapBufferedResponseConsumerFactory(int bufferLimitBytes) { this.bufferLimit = bufferLimitBytes; } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 89c3309dbbd..f808c36d60a 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -25,6 +25,7 @@ import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.HttpRequest; import org.apache.http.HttpResponse; +import org.apache.http.client.AuthCache; import org.apache.http.client.ClientProtocolException; import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; import org.apache.http.client.methods.HttpHead; @@ -34,8 +35,11 @@ import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.methods.HttpTrace; +import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.client.utils.URIBuilder; import org.apache.http.concurrent.FutureCallback; +import org.apache.http.impl.auth.BasicScheme; +import org.apache.http.impl.client.BasicAuthCache; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; import org.apache.http.nio.client.methods.HttpAsyncMethods; import org.apache.http.nio.protocol.HttpAsyncRequestProducer; @@ -49,6 +53,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -91,7 +96,7 @@ public class RestClient implements Closeable { private final long maxRetryTimeoutMillis; private final String pathPrefix; private final AtomicInteger lastHostIndex = new AtomicInteger(0); - private volatile Set hosts; + private volatile HostTuple> hostTuple; private final ConcurrentMap blacklist = new ConcurrentHashMap<>(); private final FailureListener failureListener; @@ -121,11 +126,13 @@ public class RestClient implements Closeable { throw new IllegalArgumentException("hosts must not be null nor empty"); } Set httpHosts = new HashSet<>(); + AuthCache authCache = new BasicAuthCache(); for (HttpHost host : hosts) { Objects.requireNonNull(host, "host cannot be null"); httpHosts.add(host); + authCache.put(host, new BasicScheme()); } - this.hosts = Collections.unmodifiableSet(httpHosts); + this.hostTuple = new HostTuple<>(Collections.unmodifiableSet(httpHosts), authCache); this.blacklist.clear(); } @@ -282,29 +289,61 @@ public class RestClient implements Closeable { public void performRequestAsync(String method, String endpoint, Map params, HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, ResponseListener responseListener, Header... headers) { - URI uri = buildUri(pathPrefix, endpoint, params); + Objects.requireNonNull(params, "params must not be null"); + Map requestParams = new HashMap<>(params); + //ignore is a special parameter supported by the clients, shouldn't be sent to es + String ignoreString = requestParams.remove("ignore"); + Set ignoreErrorCodes; + if (ignoreString == null) { + if (HttpHead.METHOD_NAME.equals(method)) { + //404 never causes error if returned for a HEAD request + ignoreErrorCodes = Collections.singleton(404); + } else { + ignoreErrorCodes = Collections.emptySet(); + } + } else { + String[] ignoresArray = ignoreString.split(","); + ignoreErrorCodes = new HashSet<>(); + if (HttpHead.METHOD_NAME.equals(method)) { + //404 never causes error if returned for a HEAD request + ignoreErrorCodes.add(404); + } + for (String ignoreCode : ignoresArray) { + try { + ignoreErrorCodes.add(Integer.valueOf(ignoreCode)); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead", e); + } + } + } + URI uri = buildUri(pathPrefix, endpoint, requestParams); HttpRequestBase request = createHttpRequest(method, uri, entity); setHeaders(request, headers); FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener); long startTime = System.nanoTime(); - performRequestAsync(startTime, nextHost().iterator(), request, httpAsyncResponseConsumerFactory, failureTrackingResponseListener); + performRequestAsync(startTime, nextHost(), request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, + failureTrackingResponseListener); } - private void performRequestAsync(final long startTime, final Iterator hosts, final HttpRequestBase request, + private void performRequestAsync(final long startTime, final HostTuple> hostTuple, final HttpRequestBase request, + final Set ignoreErrorCodes, final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, final FailureTrackingResponseListener listener) { - final HttpHost host = hosts.next(); + final HttpHost host = hostTuple.hosts.next(); //we stream the request body if the entity allows for it - HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request); - HttpAsyncResponseConsumer asyncResponseConsumer = httpAsyncResponseConsumerFactory.createHttpAsyncResponseConsumer(); - client.execute(requestProducer, asyncResponseConsumer, new FutureCallback() { + final HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request); + final HttpAsyncResponseConsumer asyncResponseConsumer = + httpAsyncResponseConsumerFactory.createHttpAsyncResponseConsumer(); + final HttpClientContext context = HttpClientContext.create(); + context.setAuthCache(hostTuple.authCache); + client.execute(requestProducer, asyncResponseConsumer, context, new FutureCallback() { @Override public void completed(HttpResponse httpResponse) { try { RequestLogger.logResponse(logger, request, host, httpResponse); int statusCode = httpResponse.getStatusLine().getStatusCode(); Response response = new Response(request.getRequestLine(), host, httpResponse); - if (isSuccessfulResponse(request.getMethod(), statusCode)) { + if (isSuccessfulResponse(statusCode) || ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { onResponse(host); listener.onSuccess(response); } else { @@ -312,7 +351,7 @@ public class RestClient implements Closeable { if (isRetryStatus(statusCode)) { //mark host dead and retry against next one onFailure(host); - retryIfPossible(responseException, hosts, request); + retryIfPossible(responseException); } else { //mark host alive and don't retry, as the error should be a request problem onResponse(host); @@ -329,14 +368,14 @@ public class RestClient implements Closeable { try { RequestLogger.logFailedRequest(logger, request, host, failure); onFailure(host); - retryIfPossible(failure, hosts, request); + retryIfPossible(failure); } catch(Exception e) { listener.onDefinitiveFailure(e); } } - private void retryIfPossible(Exception exception, Iterator hosts, HttpRequestBase request) { - if (hosts.hasNext()) { + private void retryIfPossible(Exception exception) { + if (hostTuple.hosts.hasNext()) { //in case we are retrying, check whether maxRetryTimeout has been reached long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); long timeout = maxRetryTimeoutMillis - timeElapsedMillis; @@ -347,7 +386,7 @@ public class RestClient implements Closeable { } else { listener.trackFailure(exception); request.reset(); - performRequestAsync(startTime, hosts, request, httpAsyncResponseConsumerFactory, listener); + performRequestAsync(startTime, hostTuple, request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, listener); } } else { listener.onDefinitiveFailure(exception); @@ -385,17 +424,18 @@ public class RestClient implements Closeable { * The iterator returned will never be empty. In case there are no healthy hosts available, or dead ones to be be retried, * one dead host gets returned so that it can be retried. */ - private Iterable nextHost() { + private HostTuple> nextHost() { + final HostTuple> hostTuple = this.hostTuple; Collection nextHosts = Collections.emptySet(); do { - Set filteredHosts = new HashSet<>(hosts); + Set filteredHosts = new HashSet<>(hostTuple.hosts); for (Map.Entry entry : blacklist.entrySet()) { if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) { filteredHosts.remove(entry.getKey()); } } if (filteredHosts.isEmpty()) { - //last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried + //last resort: if there are no good host to use, return a single dead one, the one that's closest to being retried List> sortedHosts = new ArrayList<>(blacklist.entrySet()); if (sortedHosts.size() > 0) { Collections.sort(sortedHosts, new Comparator>() { @@ -414,7 +454,7 @@ public class RestClient implements Closeable { nextHosts = rotatedHosts; } } while(nextHosts.isEmpty()); - return nextHosts; + return new HostTuple<>(nextHosts.iterator(), hostTuple.authCache); } /** @@ -452,8 +492,8 @@ public class RestClient implements Closeable { client.close(); } - private static boolean isSuccessfulResponse(String method, int statusCode) { - return statusCode < 300 || (HttpHead.METHOD_NAME.equals(method) && statusCode == 404); + private static boolean isSuccessfulResponse(int statusCode) { + return statusCode < 300; } private static boolean isRetryStatus(int statusCode) { @@ -510,7 +550,6 @@ public class RestClient implements Closeable { } private static URI buildUri(String pathPrefix, String path, Map params) { - Objects.requireNonNull(params, "params must not be null"); Objects.requireNonNull(path, "path must not be null"); try { String fullPath; @@ -657,4 +696,18 @@ public class RestClient implements Closeable { } } + + /** + * {@code HostTuple} enables the {@linkplain HttpHost}s and {@linkplain AuthCache} to be set together in a thread + * safe, volatile way. + */ + private static class HostTuple { + public final T hosts; + public final AuthCache authCache; + + HostTuple(final T hosts, final AuthCache authCache) { + this.hosts = hosts; + this.authCache = authCache; + } + } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java index d881bd70a44..4466a61d9df 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java @@ -28,6 +28,8 @@ import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.nio.conn.SchemeIOSessionStrategy; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.Objects; /** @@ -177,7 +179,12 @@ public final class RestClientBuilder { if (failureListener == null) { failureListener = new RestClient.FailureListener(); } - CloseableHttpAsyncClient httpClient = createHttpClient(); + CloseableHttpAsyncClient httpClient = AccessController.doPrivileged(new PrivilegedAction() { + @Override + public CloseableHttpAsyncClient run() { + return createHttpClient(); + } + }); RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, pathPrefix, failureListener); httpClient.start(); return restClient; diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index f997f798712..da5a960c2e8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -24,6 +24,7 @@ import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; import org.apache.http.HttpHost; import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; +import org.elasticsearch.mocksocket.MockHttpServer; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -80,7 +81,7 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { } private static HttpServer createHttpServer() throws Exception { - HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); httpServer.start(); //returns a different status code depending on the path for (int statusCode : getAllStatusCodes()) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index 90ee4431009..6f87a244ff5 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -26,8 +26,10 @@ import org.apache.http.HttpResponse; import org.apache.http.ProtocolVersion; import org.apache.http.StatusLine; import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.concurrent.FutureCallback; import org.apache.http.conn.ConnectTimeoutException; +import org.apache.http.impl.auth.BasicScheme; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; import org.apache.http.message.BasicHttpResponse; import org.apache.http.message.BasicStatusLine; @@ -73,13 +75,15 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { public void createRestClient() throws IOException { CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); when(httpClient.execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class), - any(FutureCallback.class))).thenAnswer(new Answer>() { + any(HttpClientContext.class), any(FutureCallback.class))).thenAnswer(new Answer>() { @Override public Future answer(InvocationOnMock invocationOnMock) throws Throwable { HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; HttpUriRequest request = (HttpUriRequest)requestProducer.generateRequest(); HttpHost httpHost = requestProducer.getTarget(); - FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[2]; + HttpClientContext context = (HttpClientContext) invocationOnMock.getArguments()[2]; + assertThat(context.getAuthCache().get(httpHost), instanceOf(BasicScheme.class)); + FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; //return the desired status code or exception depending on the path if (request.getURI().getPath().equals("/soe")) { futureCallback.failed(new SocketTimeoutException(httpHost.toString())); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 4440c1e8f97..e75de2f609c 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -26,9 +26,14 @@ import com.sun.net.httpserver.HttpServer; import org.apache.http.Consts; import org.apache.http.Header; import org.apache.http.HttpHost; +import org.apache.http.auth.AuthScope; +import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.BasicCredentialsProvider; +import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.util.EntityUtils; import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; +import org.elasticsearch.mocksocket.MockHttpServer; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -39,7 +44,6 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -48,8 +52,10 @@ import java.util.Set; import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; /** @@ -67,27 +73,14 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { @BeforeClass public static void startHttpServer() throws Exception { - String pathPrefixWithoutLeadingSlash; - if (randomBoolean()) { - pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiOfLengthBetween(1, 5); - pathPrefix = "/" + pathPrefixWithoutLeadingSlash; - } else { - pathPrefix = pathPrefixWithoutLeadingSlash = ""; - } - + pathPrefix = randomBoolean() ? "/testPathPrefix/" + randomAsciiOfLengthBetween(1, 5) : ""; httpServer = createHttpServer(); - int numHeaders = randomIntBetween(0, 5); - defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders); - RestClientBuilder restClientBuilder = RestClient.builder( - new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())).setDefaultHeaders(defaultHeaders); - if (pathPrefix.length() > 0) { - restClientBuilder.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefixWithoutLeadingSlash); - } - restClient = restClientBuilder.build(); + defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); + restClient = createRestClient(false, true); } private static HttpServer createHttpServer() throws Exception { - HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); httpServer.start(); //returns a different status code depending on the path for (int statusCode : getAllStatusCodes()) { @@ -131,6 +124,35 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { } } + private static RestClient createRestClient(final boolean useAuth, final boolean usePreemptiveAuth) { + // provide the username/password for every request + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials("user", "pass")); + + final RestClientBuilder restClientBuilder = RestClient.builder( + new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())).setDefaultHeaders(defaultHeaders); + if (pathPrefix.length() > 0) { + // sometimes cut off the leading slash + restClientBuilder.setPathPrefix(randomBoolean() ? pathPrefix.substring(1) : pathPrefix); + } + + if (useAuth) { + restClientBuilder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() { + @Override + public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder httpClientBuilder) { + if (usePreemptiveAuth == false) { + // disable preemptive auth by ignoring any authcache + httpClientBuilder.disableAuthCaching(); + } + + return httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider); + } + }); + } + + return restClientBuilder.build(); + } + @AfterClass public static void stopHttpServers() throws IOException { restClient.close(); @@ -150,42 +172,25 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { if (method.equals("HEAD") == false) { standardHeaders.add("Content-length"); } - - final int numHeaders = randomIntBetween(1, 5); - final Header[] headers = generateHeaders("Header", "Header-array", numHeaders); - final Map> expectedHeaders = new HashMap<>(); - - addHeaders(expectedHeaders, defaultHeaders, headers); - + final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); Response esResponse; try { - esResponse = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), headers); + esResponse = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), requestHeaders); } catch(ResponseException e) { esResponse = e.getResponse(); } assertEquals(method, esResponse.getRequestLine().getMethod()); assertEquals(statusCode, esResponse.getStatusLine().getStatusCode()); - assertEquals((pathPrefix.length() > 0 ? pathPrefix : "") + "/" + statusCode, esResponse.getRequestLine().getUri()); - + assertEquals(pathPrefix + "/" + statusCode, esResponse.getRequestLine().getUri()); + assertHeaders(defaultHeaders, requestHeaders, esResponse.getHeaders(), standardHeaders); for (final Header responseHeader : esResponse.getHeaders()) { - final String name = responseHeader.getName(); - final String value = responseHeader.getValue(); - if (name.startsWith("Header")) { - final List values = expectedHeaders.get(name); - assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values); - assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value)); - - // we've collected them all - if (values.isEmpty()) { - expectedHeaders.remove(name); - } - } else { + String name = responseHeader.getName(); + if (name.startsWith("Header") == false) { assertTrue("unknown header was returned " + name, standardHeaders.remove(name)); } } - assertTrue("some headers that were sent weren't returned: " + expectedHeaders, expectedHeaders.isEmpty()); assertTrue("some expected standard headers weren't returned: " + standardHeaders, standardHeaders.isEmpty()); } } @@ -208,7 +213,41 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { bodyTest("GET"); } - private void bodyTest(String method) throws IOException { + /** + * Verify that credentials are sent on the first request with preemptive auth enabled (default when provided with credentials). + */ + public void testPreemptiveAuthEnabled() throws IOException { + final String[] methods = { "POST", "PUT", "GET", "DELETE" }; + + try (RestClient restClient = createRestClient(true, true)) { + for (final String method : methods) { + final Response response = bodyTest(restClient, method); + + assertThat(response.getHeader("Authorization"), startsWith("Basic")); + } + } + } + + /** + * Verify that credentials are not sent on the first request with preemptive auth disabled. + */ + public void testPreemptiveAuthDisabled() throws IOException { + final String[] methods = { "POST", "PUT", "GET", "DELETE" }; + + try (RestClient restClient = createRestClient(true, false)) { + for (final String method : methods) { + final Response response = bodyTest(restClient, method); + + assertThat(response.getHeader("Authorization"), nullValue()); + } + } + } + + private Response bodyTest(final String method) throws IOException { + return bodyTest(restClient, method); + } + + private Response bodyTest(final RestClient restClient, final String method) throws IOException { String requestBody = "{ \"field\": \"value\" }"; StringEntity entity = new StringEntity(requestBody); int statusCode = randomStatusCode(getRandom()); @@ -220,7 +259,9 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { } assertEquals(method, esResponse.getRequestLine().getMethod()); assertEquals(statusCode, esResponse.getStatusLine().getStatusCode()); - assertEquals((pathPrefix.length() > 0 ? pathPrefix : "") + "/" + statusCode, esResponse.getRequestLine().getUri()); + assertEquals(pathPrefix + "/" + statusCode, esResponse.getRequestLine().getUri()); assertEquals(requestBody, EntityUtils.toString(esResponse.getEntity())); + + return esResponse; } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index ce0d6d0936e..69048988ee9 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -34,10 +34,12 @@ import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpTrace; import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.client.utils.URIBuilder; import org.apache.http.concurrent.FutureCallback; import org.apache.http.conn.ConnectTimeoutException; import org.apache.http.entity.StringEntity; +import org.apache.http.impl.auth.BasicScheme; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; import org.apache.http.message.BasicHttpResponse; import org.apache.http.message.BasicStatusLine; @@ -56,7 +58,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.Future; @@ -70,7 +71,6 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -98,11 +98,13 @@ public class RestClientSingleHostTests extends RestClientTestCase { public void createRestClient() throws IOException { httpClient = mock(CloseableHttpAsyncClient.class); when(httpClient.execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class), - any(FutureCallback.class))).thenAnswer(new Answer>() { + any(HttpClientContext.class), any(FutureCallback.class))).thenAnswer(new Answer>() { @Override public Future answer(InvocationOnMock invocationOnMock) throws Throwable { HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; - FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[2]; + HttpClientContext context = (HttpClientContext) invocationOnMock.getArguments()[2]; + assertThat(context.getAuthCache().get(httpHost), instanceOf(BasicScheme.class)); + FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; HttpUriRequest request = (HttpUriRequest)requestProducer.generateRequest(); //return the desired status code or exception depending on the path if (request.getURI().getPath().equals("/soe")) { @@ -131,9 +133,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { } }); - - int numHeaders = randomIntBetween(0, 3); - defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders); + defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); httpHost = new HttpHost("localhost", 9200); failureListener = new HostsTrackingFailureListener(); restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, null, failureListener); @@ -160,7 +160,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { for (String httpMethod : getHttpMethods()) { HttpUriRequest expectedRequest = performRandomRequest(httpMethod); verify(httpClient, times(++times)).execute(requestArgumentCaptor.capture(), - any(HttpAsyncResponseConsumer.class), any(FutureCallback.class)); + any(HttpAsyncResponseConsumer.class), any(HttpClientContext.class), any(FutureCallback.class)); HttpUriRequest actualRequest = (HttpUriRequest)requestArgumentCaptor.getValue().generateRequest(); assertEquals(expectedRequest.getURI(), actualRequest.getURI()); assertEquals(expectedRequest.getClass(), actualRequest.getClass()); @@ -220,23 +220,45 @@ public class RestClientSingleHostTests extends RestClientTestCase { */ public void testErrorStatusCodes() throws IOException { for (String method : getHttpMethods()) { + Set expectedIgnores = new HashSet<>(); + String ignoreParam = ""; + if (HttpHead.METHOD_NAME.equals(method)) { + expectedIgnores.add(404); + } + if (randomBoolean()) { + int numIgnores = randomIntBetween(1, 3); + for (int i = 0; i < numIgnores; i++) { + Integer code = randomFrom(getAllErrorStatusCodes()); + expectedIgnores.add(code); + ignoreParam += code; + if (i < numIgnores - 1) { + ignoreParam += ","; + } + } + } //error status codes should cause an exception to be thrown for (int errorStatusCode : getAllErrorStatusCodes()) { try { - Response response = performRequest(method, "/" + errorStatusCode); - if (method.equals("HEAD") && errorStatusCode == 404) { - //no exception gets thrown although we got a 404 - assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode)); + Map params; + if (ignoreParam.isEmpty()) { + params = Collections.emptyMap(); + } else { + params = Collections.singletonMap("ignore", ignoreParam); + } + Response response = performRequest(method, "/" + errorStatusCode, params); + if (expectedIgnores.contains(errorStatusCode)) { + //no exception gets thrown although we got an error status code, as it was configured to be ignored + assertEquals(errorStatusCode, response.getStatusLine().getStatusCode()); } else { fail("request should have failed"); } } catch(ResponseException e) { - if (method.equals("HEAD") && errorStatusCode == 404) { + if (expectedIgnores.contains(errorStatusCode)) { throw e; } - assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(errorStatusCode)); + assertEquals(errorStatusCode, e.getResponse().getStatusLine().getStatusCode()); } - if (errorStatusCode <= 500) { + if (errorStatusCode <= 500 || expectedIgnores.contains(errorStatusCode)) { failureListener.assertNotCalled(); } else { failureListener.assertCalled(httpHost); @@ -339,44 +361,26 @@ public class RestClientSingleHostTests extends RestClientTestCase { */ public void testHeaders() throws IOException { for (String method : getHttpMethods()) { - final int numHeaders = randomIntBetween(1, 5); - final Header[] headers = generateHeaders("Header", null, numHeaders); - final Map> expectedHeaders = new HashMap<>(); - - addHeaders(expectedHeaders, defaultHeaders, headers); - + final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); Response esResponse; try { - esResponse = restClient.performRequest(method, "/" + statusCode, headers); + esResponse = restClient.performRequest(method, "/" + statusCode, requestHeaders); } catch(ResponseException e) { esResponse = e.getResponse(); } assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode)); - for (Header responseHeader : esResponse.getHeaders()) { - final String name = responseHeader.getName(); - final String value = responseHeader.getValue(); - final List values = expectedHeaders.get(name); - assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values); - assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value)); - - // we've collected them all - if (values.isEmpty()) { - expectedHeaders.remove(name); - } - } - assertTrue("some headers that were sent weren't returned " + expectedHeaders, expectedHeaders.isEmpty()); + assertHeaders(defaultHeaders, requestHeaders, esResponse.getHeaders(), Collections.emptySet()); } } private HttpUriRequest performRandomRequest(String method) throws Exception { String uriAsString = "/" + randomStatusCode(getRandom()); URIBuilder uriBuilder = new URIBuilder(uriAsString); - Map params = Collections.emptyMap(); + final Map params = new HashMap<>(); boolean hasParams = randomBoolean(); if (hasParams) { int numParams = randomIntBetween(1, 3); - params = new HashMap<>(numParams); for (int i = 0; i < numParams; i++) { String paramKey = "param-" + i; String paramValue = randomAsciiOfLengthBetween(3, 10); @@ -384,6 +388,14 @@ public class RestClientSingleHostTests extends RestClientTestCase { uriBuilder.addParameter(paramKey, paramValue); } } + if (randomBoolean()) { + //randomly add some ignore parameter, which doesn't get sent as part of the request + String ignore = Integer.toString(randomFrom(RestClientTestUtil.getAllErrorStatusCodes())); + if (randomBoolean()) { + ignore += "," + Integer.toString(randomFrom(RestClientTestUtil.getAllErrorStatusCodes())); + } + params.put("ignore", ignore); + } URI uri = uriBuilder.build(); HttpUriRequest request; @@ -424,10 +436,9 @@ public class RestClientSingleHostTests extends RestClientTestCase { } Header[] headers = new Header[0]; - final int numHeaders = randomIntBetween(1, 5); - final Set uniqueNames = new HashSet<>(numHeaders); + final Set uniqueNames = new HashSet<>(); if (randomBoolean()) { - headers = generateHeaders("Header", "Header-array", numHeaders); + headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); for (Header header : headers) { request.addHeader(header); uniqueNames.add(header.getName()); @@ -455,16 +466,25 @@ public class RestClientSingleHostTests extends RestClientTestCase { } private Response performRequest(String method, String endpoint, Header... headers) throws IOException { - switch(randomIntBetween(0, 2)) { + return performRequest(method, endpoint, Collections.emptyMap(), headers); + } + + private Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { + int methodSelector; + if (params.isEmpty()) { + methodSelector = randomIntBetween(0, 2); + } else { + methodSelector = randomIntBetween(1, 2); + } + switch(methodSelector) { case 0: return restClient.performRequest(method, endpoint, headers); case 1: - return restClient.performRequest(method, endpoint, Collections.emptyMap(), headers); + return restClient.performRequest(method, endpoint, params, headers); case 2: - return restClient.performRequest(method, endpoint, Collections.emptyMap(), (HttpEntity)null, headers); + return restClient.performRequest(method, endpoint, params, (HttpEntity)null, headers); default: throw new UnsupportedOperationException(); } } - } diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index f35110e4f9e..5542792835b 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -43,6 +43,7 @@ dependencies { testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" testCompile "org.elasticsearch:securemock:${versions.securemock}" + testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}" testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15" signature "org.codehaus.mojo.signature:java17:1.0@signature" } diff --git a/client/sniffer/licenses/jackson-core-2.8.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.8.1.jar.sha1 deleted file mode 100644 index b92131d6fab..00000000000 --- a/client/sniffer/licenses/jackson-core-2.8.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd13b1c033741d48291315c6370f7d475a42dccf \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.8.6.jar.sha1 b/client/sniffer/licenses/jackson-core-2.8.6.jar.sha1 new file mode 100644 index 00000000000..af7677d13c2 --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.8.6.jar.sha1 @@ -0,0 +1 @@ +2ef7b1cc34de149600f5e75bc2d5bf40de894e60 \ No newline at end of file diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java index aeb0620134b..5221b205dd4 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientTestCase; +import org.elasticsearch.mocksocket.MockHttpServer; import org.junit.After; import org.junit.Before; @@ -141,7 +142,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { } private static HttpServer createHttpServer(final SniffResponse sniffResponse, final int sniffTimeoutMillis) throws IOException { - HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); httpServer.createContext("/_nodes/http", new ResponseHandler(sniffTimeoutMillis, sniffResponse)); return httpServer; } diff --git a/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java b/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java index 4296932a002..6a2a45ef281 100644 --- a/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java +++ b/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java @@ -30,16 +30,19 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; - import org.apache.http.Header; -import org.apache.http.message.BasicHeader; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + @TestMethodProviders({ JUnit3MethodProvider.class }) @@ -53,70 +56,56 @@ import java.util.Set; public abstract class RestClientTestCase extends RandomizedTest { /** - * Create the specified number of {@link Header}s. - *

- * Generated header names will be the {@code baseName} plus its index or, rarely, the {@code arrayName} if it's supplied. + * Assert that the actual headers are the expected ones given the original default and request headers. Some headers can be ignored, + * for instance in case the http client is adding its own automatically. * - * @param baseName The base name to use for all headers. - * @param arrayName The optional ({@code null}able) array name to use randomly. - * @param headers The number of headers to create. - * @return Never {@code null}. + * @param defaultHeaders the default headers set to the REST client instance + * @param requestHeaders the request headers sent with a particular request + * @param actualHeaders the actual headers as a result of the provided default and request headers + * @param ignoreHeaders header keys to be ignored as they are not part of default nor request headers, yet they + * will be part of the actual ones */ - protected static Header[] generateHeaders(final String baseName, final String arrayName, final int headers) { - final Header[] generated = new Header[headers]; - for (int i = 0; i < headers; i++) { - String headerName = baseName + i; - if (arrayName != null && rarely()) { - headerName = arrayName; - } - - generated[i] = new BasicHeader(headerName, randomAsciiOfLengthBetween(3, 10)); + protected static void assertHeaders(final Header[] defaultHeaders, final Header[] requestHeaders, + final Header[] actualHeaders, final Set ignoreHeaders) { + final Map> expectedHeaders = new HashMap<>(); + final Set requestHeaderKeys = new HashSet<>(); + for (final Header header : requestHeaders) { + final String name = header.getName(); + addValueToListEntry(expectedHeaders, name, header.getValue()); + requestHeaderKeys.add(name); } - return generated; + for (final Header defaultHeader : defaultHeaders) { + final String name = defaultHeader.getName(); + if (requestHeaderKeys.contains(name) == false) { + addValueToListEntry(expectedHeaders, name, defaultHeader.getValue()); + } + } + Set actualIgnoredHeaders = new HashSet<>(); + for (Header responseHeader : actualHeaders) { + final String name = responseHeader.getName(); + if (ignoreHeaders.contains(name)) { + expectedHeaders.remove(name); + actualIgnoredHeaders.add(name); + continue; + } + final String value = responseHeader.getValue(); + final List values = expectedHeaders.get(name); + assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values); + assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value)); + if (values.isEmpty()) { + expectedHeaders.remove(name); + } + } + assertEquals("some headers meant to be ignored were not part of the actual headers", ignoreHeaders, actualIgnoredHeaders); + assertTrue("some headers that were sent weren't returned " + expectedHeaders, expectedHeaders.isEmpty()); } - /** - * Create a new {@link List} within the {@code map} if none exists for {@code name} or append to the existing list. - * - * @param map The map to manipulate. - * @param name The name to create/append the list for. - * @param value The value to add. - */ - private static void createOrAppendList(final Map> map, final String name, final String value) { + private static void addValueToListEntry(final Map> map, final String name, final String value) { List values = map.get(name); - if (values == null) { values = new ArrayList<>(); map.put(name, values); } - values.add(value); } - - /** - * Add the {@code headers} to the {@code map} so that related tests can more easily assert that they exist. - *

- * If both the {@code defaultHeaders} and {@code headers} contain the same {@link Header}, based on its - * {@linkplain Header#getName() name}, then this will only use the {@code Header}(s) from {@code headers}. - * - * @param map The map to build with name/value(s) pairs. - * @param defaultHeaders The headers to add to the map representing default headers. - * @param headers The headers to add to the map representing request-level headers. - * @see #createOrAppendList(Map, String, String) - */ - protected static void addHeaders(final Map> map, final Header[] defaultHeaders, final Header[] headers) { - final Set uniqueHeaders = new HashSet<>(); - for (final Header header : headers) { - final String name = header.getName(); - createOrAppendList(map, name, header.getValue()); - uniqueHeaders.add(name); - } - for (final Header defaultHeader : defaultHeaders) { - final String name = defaultHeader.getName(); - if (uniqueHeaders.contains(name) == false) { - createOrAppendList(map, name, defaultHeader.getValue()); - } - } - } - } diff --git a/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java index 4d4aa00f492..a0a6641abbc 100644 --- a/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java +++ b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java @@ -19,7 +19,11 @@ package org.elasticsearch.client; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.apache.http.Header; +import org.apache.http.message.BasicHeader; import java.util.ArrayList; import java.util.Arrays; @@ -55,7 +59,7 @@ final class RestClientTestUtil { } static int randomStatusCode(Random random) { - return RandomPicks.randomFrom(random, ALL_ERROR_STATUS_CODES); + return RandomPicks.randomFrom(random, ALL_STATUS_CODES); } static int randomOkStatusCode(Random random) { @@ -81,4 +85,23 @@ final class RestClientTestUtil { static List getAllStatusCodes() { return ALL_STATUS_CODES; } + + /** + * Create a random number of {@link Header}s. + * Generated header names will either be the {@code baseName} plus its index, or exactly the provided {@code baseName} so that the + * we test also support for multiple headers with same key and different values. + */ + static Header[] randomHeaders(Random random, final String baseName) { + int numHeaders = RandomNumbers.randomIntBetween(random, 0, 5); + final Header[] headers = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = baseName; + //randomly exercise the code path that supports multiple headers with same key + if (random.nextBoolean()) { + headerName = headerName + i; + } + headers[i] = new BasicHeader(headerName, RandomStrings.randomAsciiOfLengthBetween(random, 3, 10)); + } + return headers; + } } diff --git a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java index ca90723ae82..3233470a253 100644 --- a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java +++ b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java @@ -22,6 +22,7 @@ package org.elasticsearch.transport.client; import io.netty.util.ThreadDeathWatcher; import io.netty.util.concurrent.GlobalEventExecutor; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.reindex.ReindexPlugin; @@ -36,41 +37,70 @@ import java.util.Collections; import java.util.concurrent.TimeUnit; /** - * A builder to create an instance of {@link TransportClient} - * This class pre-installs the + * A builder to create an instance of {@link TransportClient}. This class pre-installs the * {@link Netty4Plugin}, * {@link ReindexPlugin}, * {@link PercolatorPlugin}, * and {@link MustachePlugin} - * for the client. These plugins are all elasticsearch core modules required. + * plugins for the client. These plugins are all the required modules for Elasticsearch. */ @SuppressWarnings({"unchecked","varargs"}) public class PreBuiltTransportClient extends TransportClient { - private static final Collection> PRE_INSTALLED_PLUGINS = - Collections.unmodifiableList( - Arrays.asList( - Netty4Plugin.class, - ReindexPlugin.class, - PercolatorPlugin.class, - MustachePlugin.class)); + static { + // initialize Netty system properties before triggering any Netty class loads + initializeNetty(); + } + /** + * Netty wants to do some unsafe things like use unsafe and replace a private field. This method disables these things by default, but + * can be overridden by setting the corresponding system properties. + */ + @SuppressForbidden(reason = "set system properties to configure Netty") + private static void initializeNetty() { + final String noUnsafeKey = "io.netty.noUnsafe"; + final String noUnsafe = System.getProperty(noUnsafeKey); + if (noUnsafe == null) { + // disable Netty from using unsafe + // while permissions are needed to set this, if a security exception is thrown the permission needed can either be granted or + // the system property can be set directly before starting the JVM; therefore, we do not catch a security exception here + System.setProperty(noUnsafeKey, Boolean.toString(true)); + } + + final String noKeySetOptimizationKey = "io.netty.noKeySetOptimization"; + final String noKeySetOptimization = System.getProperty(noKeySetOptimizationKey); + if (noKeySetOptimization == null) { + // disable Netty from replacing the selector key set + // while permissions are needed to set this, if a security exception is thrown the permission needed can either be granted or + // the system property can be set directly before starting the JVM; therefore, we do not catch a security exception here + System.setProperty(noKeySetOptimizationKey, Boolean.toString(true)); + } + } + + private static final Collection> PRE_INSTALLED_PLUGINS = + Collections.unmodifiableList( + Arrays.asList( + Netty4Plugin.class, + ReindexPlugin.class, + PercolatorPlugin.class, + MustachePlugin.class)); /** * Creates a new transport client with pre-installed plugins. + * * @param settings the settings passed to this transport client - * @param plugins an optional array of additional plugins to run with this client + * @param plugins an optional array of additional plugins to run with this client */ @SafeVarargs public PreBuiltTransportClient(Settings settings, Class... plugins) { this(settings, Arrays.asList(plugins)); } - /** * Creates a new transport client with pre-installed plugins. + * * @param settings the settings passed to this transport client - * @param plugins a collection of additional plugins to run with this client + * @param plugins a collection of additional plugins to run with this client */ public PreBuiltTransportClient(Settings settings, Collection> plugins) { this(settings, plugins, null); @@ -78,12 +108,15 @@ public class PreBuiltTransportClient extends TransportClient { /** * Creates a new transport client with pre-installed plugins. - * @param settings the settings passed to this transport client - * @param plugins a collection of additional plugins to run with this client - * @param hostFailureListener a failure listener that is invoked if a node is disconnected. This can be null + * + * @param settings the settings passed to this transport client + * @param plugins a collection of additional plugins to run with this client + * @param hostFailureListener a failure listener that is invoked if a node is disconnected; this can be null */ - public PreBuiltTransportClient(Settings settings, Collection> plugins, - HostFailureListener hostFailureListener) { + public PreBuiltTransportClient( + Settings settings, + Collection> plugins, + HostFailureListener hostFailureListener) { super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS), hostFailureListener); } diff --git a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java index a1d95b68af7..161b1d7e89c 100644 --- a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java +++ b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java @@ -41,8 +41,6 @@ public class PreBuiltTransportClientTests extends RandomizedTest { @Test public void testPluginInstalled() { - // TODO: remove when Netty 4.1.5 is upgraded to Netty 4.1.6 including https://github.com/netty/netty/pull/5778 - assumeFalse(Constants.JRE_IS_MINIMUM_JAVA9); try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) { Settings settings = client.settings(); assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings)); diff --git a/core/build.gradle b/core/build.gradle index 7a580335571..6e0b94dd6f9 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -94,6 +94,8 @@ dependencies { exclude group: 'org.elasticsearch', module: 'elasticsearch' } } + testCompile 'com.google.jimfs:jimfs:1.1' + testCompile 'com.google.guava:guava:18.0' } if (isEclipse) { diff --git a/core/licenses/jackson-core-2.8.1.jar.sha1 b/core/licenses/jackson-core-2.8.1.jar.sha1 deleted file mode 100644 index b92131d6fab..00000000000 --- a/core/licenses/jackson-core-2.8.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd13b1c033741d48291315c6370f7d475a42dccf \ No newline at end of file diff --git a/core/licenses/jackson-core-2.8.6.jar.sha1 b/core/licenses/jackson-core-2.8.6.jar.sha1 new file mode 100644 index 00000000000..af7677d13c2 --- /dev/null +++ b/core/licenses/jackson-core-2.8.6.jar.sha1 @@ -0,0 +1 @@ +2ef7b1cc34de149600f5e75bc2d5bf40de894e60 \ No newline at end of file diff --git a/core/licenses/jackson-dataformat-cbor-2.8.1.jar.sha1 b/core/licenses/jackson-dataformat-cbor-2.8.1.jar.sha1 deleted file mode 100644 index 7f1609bfd85..00000000000 --- a/core/licenses/jackson-dataformat-cbor-2.8.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a6fb7e75c9972559a78cf5cfc5a48a41a13ea40 \ No newline at end of file diff --git a/core/licenses/jackson-dataformat-cbor-2.8.6.jar.sha1 b/core/licenses/jackson-dataformat-cbor-2.8.6.jar.sha1 new file mode 100644 index 00000000000..6a2e9802353 --- /dev/null +++ b/core/licenses/jackson-dataformat-cbor-2.8.6.jar.sha1 @@ -0,0 +1 @@ +b88721371cfa2d7242bb5e52fe70861aa061c050 \ No newline at end of file diff --git a/core/licenses/jackson-dataformat-smile-2.8.1.jar.sha1 b/core/licenses/jackson-dataformat-smile-2.8.1.jar.sha1 deleted file mode 100644 index 114d656a388..00000000000 --- a/core/licenses/jackson-dataformat-smile-2.8.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -005b73867bc12224946fc67fc8d49d9f5e698d7f \ No newline at end of file diff --git a/core/licenses/jackson-dataformat-smile-2.8.6.jar.sha1 b/core/licenses/jackson-dataformat-smile-2.8.6.jar.sha1 new file mode 100644 index 00000000000..19be9a2040b --- /dev/null +++ b/core/licenses/jackson-dataformat-smile-2.8.6.jar.sha1 @@ -0,0 +1 @@ +71590ad45cee21249774e2f93e5eca66e446cef3 \ No newline at end of file diff --git a/core/licenses/jackson-dataformat-yaml-2.8.1.jar.sha1 b/core/licenses/jackson-dataformat-yaml-2.8.1.jar.sha1 deleted file mode 100644 index 32ce0f74344..00000000000 --- a/core/licenses/jackson-dataformat-yaml-2.8.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eb63166c723b0b4b9fb5298fca232a2f6612ec34 \ No newline at end of file diff --git a/core/licenses/jackson-dataformat-yaml-2.8.6.jar.sha1 b/core/licenses/jackson-dataformat-yaml-2.8.6.jar.sha1 new file mode 100644 index 00000000000..c61dad3bbcd --- /dev/null +++ b/core/licenses/jackson-dataformat-yaml-2.8.6.jar.sha1 @@ -0,0 +1 @@ +8bd44d50f9a6cdff9c7578ea39d524eb519e35ab \ No newline at end of file diff --git a/core/licenses/lucene-analyzers-common-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-analyzers-common-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 5cab7b2fef1..00000000000 --- a/core/licenses/lucene-analyzers-common-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -770114e0188dd8b4f30e5878b4f6c8677cecf1be \ No newline at end of file diff --git a/core/licenses/lucene-analyzers-common-6.4.0.jar.sha1 b/core/licenses/lucene-analyzers-common-6.4.0.jar.sha1 new file mode 100644 index 00000000000..f0934e93d51 --- /dev/null +++ b/core/licenses/lucene-analyzers-common-6.4.0.jar.sha1 @@ -0,0 +1 @@ +e0feb9281a7da7a7df62398ab0fc655d51f68fed \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-backward-codecs-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 02677cb1ff8..00000000000 --- a/core/licenses/lucene-backward-codecs-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f4eb0257e8419beaa9f84da6a51375fda4e491f2 \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-6.4.0.jar.sha1 b/core/licenses/lucene-backward-codecs-6.4.0.jar.sha1 new file mode 100644 index 00000000000..8ba9e440224 --- /dev/null +++ b/core/licenses/lucene-backward-codecs-6.4.0.jar.sha1 @@ -0,0 +1 @@ +14698ecbca1437615ee31d412d0edd3440b4fccf \ No newline at end of file diff --git a/core/licenses/lucene-core-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-core-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index ea81fbaeb56..00000000000 --- a/core/licenses/lucene-core-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c80ad16cd36c41012abb8a8bb1c7328c6d680b4a \ No newline at end of file diff --git a/core/licenses/lucene-core-6.4.0.jar.sha1 b/core/licenses/lucene-core-6.4.0.jar.sha1 new file mode 100644 index 00000000000..e8e74ddc7a3 --- /dev/null +++ b/core/licenses/lucene-core-6.4.0.jar.sha1 @@ -0,0 +1 @@ +09dd516b847dcaf8da4e9096bf3189b0b3607aef \ No newline at end of file diff --git a/core/licenses/lucene-grouping-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-grouping-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index d4442ded938..00000000000 --- a/core/licenses/lucene-grouping-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -070d4e370f4fe0b8a04b2bce5b4381201b0c783f \ No newline at end of file diff --git a/core/licenses/lucene-grouping-6.4.0.jar.sha1 b/core/licenses/lucene-grouping-6.4.0.jar.sha1 new file mode 100644 index 00000000000..9a54629dcd4 --- /dev/null +++ b/core/licenses/lucene-grouping-6.4.0.jar.sha1 @@ -0,0 +1 @@ +68a8f986a0076ad784cbb20813b9465b94e4c846 \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-highlighter-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index e6fc043a287..00000000000 --- a/core/licenses/lucene-highlighter-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -131d9a86f5943675493a85def0e692842f396458 \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-6.4.0.jar.sha1 b/core/licenses/lucene-highlighter-6.4.0.jar.sha1 new file mode 100644 index 00000000000..fd11e9112fb --- /dev/null +++ b/core/licenses/lucene-highlighter-6.4.0.jar.sha1 @@ -0,0 +1 @@ +6d921c1242b608a4dcd0784e32bcd94097ad93cd \ No newline at end of file diff --git a/core/licenses/lucene-join-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-join-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 6c90673f498..00000000000 --- a/core/licenses/lucene-join-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -385b2202036b50a764e4d2b032e21496b74a1c8e \ No newline at end of file diff --git a/core/licenses/lucene-join-6.4.0.jar.sha1 b/core/licenses/lucene-join-6.4.0.jar.sha1 new file mode 100644 index 00000000000..171a8dfe26c --- /dev/null +++ b/core/licenses/lucene-join-6.4.0.jar.sha1 @@ -0,0 +1 @@ +74d3cdf1bc863e3836b06f1865c970127cc15f26 \ No newline at end of file diff --git a/core/licenses/lucene-memory-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-memory-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index bdb3a168612..00000000000 --- a/core/licenses/lucene-memory-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e8742a44ef4849a17d5e59ef36e9a52a8f2370c2 \ No newline at end of file diff --git a/core/licenses/lucene-memory-6.4.0.jar.sha1 b/core/licenses/lucene-memory-6.4.0.jar.sha1 new file mode 100644 index 00000000000..2ffd52a6f39 --- /dev/null +++ b/core/licenses/lucene-memory-6.4.0.jar.sha1 @@ -0,0 +1 @@ +dd13729c0b401e3df11bce0c343d1e00f07b9a19 \ No newline at end of file diff --git a/core/licenses/lucene-misc-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-misc-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index e29fc5f139c..00000000000 --- a/core/licenses/lucene-misc-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ce2e4948fb66393a34f4200a6131cfde43e47bd \ No newline at end of file diff --git a/core/licenses/lucene-misc-6.4.0.jar.sha1 b/core/licenses/lucene-misc-6.4.0.jar.sha1 new file mode 100644 index 00000000000..69c41bc81b0 --- /dev/null +++ b/core/licenses/lucene-misc-6.4.0.jar.sha1 @@ -0,0 +1 @@ +ce27abe3490bb8ccbebd2eefcb68f42a609ca986 \ No newline at end of file diff --git a/core/licenses/lucene-queries-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-queries-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 4998ff5b2e4..00000000000 --- a/core/licenses/lucene-queries-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c1c385a597ce797b0049d9b2281b09593e1488a \ No newline at end of file diff --git a/core/licenses/lucene-queries-6.4.0.jar.sha1 b/core/licenses/lucene-queries-6.4.0.jar.sha1 new file mode 100644 index 00000000000..1ce80d5ba80 --- /dev/null +++ b/core/licenses/lucene-queries-6.4.0.jar.sha1 @@ -0,0 +1 @@ +bd1978e3fdac2fadf1068828b0b1b534a56873c3 \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-queryparser-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 9ba51f22f25..00000000000 --- a/core/licenses/lucene-queryparser-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fafaa22906c067e6894f9f2b18ad03ded98e2f38 \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-6.4.0.jar.sha1 b/core/licenses/lucene-queryparser-6.4.0.jar.sha1 new file mode 100644 index 00000000000..155c688840d --- /dev/null +++ b/core/licenses/lucene-queryparser-6.4.0.jar.sha1 @@ -0,0 +1 @@ +fb8fe41948fccf13b5dbb5d50441cac974544ade \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-sandbox-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index cce2045942b..00000000000 --- a/core/licenses/lucene-sandbox-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19c64a84617f42bb4c11b1e266df4009cd37fdd0 \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-6.4.0.jar.sha1 b/core/licenses/lucene-sandbox-6.4.0.jar.sha1 new file mode 100644 index 00000000000..7df93d742c3 --- /dev/null +++ b/core/licenses/lucene-sandbox-6.4.0.jar.sha1 @@ -0,0 +1 @@ +e7f7d1ad298c4af264199d9199f34f2e4d9ca2b5 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-spatial-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 8169bea2fae..00000000000 --- a/core/licenses/lucene-spatial-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bc8613fb61c0ae95dd3680b0f65e3380c3fd0d6c \ No newline at end of file diff --git a/core/licenses/lucene-spatial-6.4.0.jar.sha1 b/core/licenses/lucene-spatial-6.4.0.jar.sha1 new file mode 100644 index 00000000000..78dd7de93d0 --- /dev/null +++ b/core/licenses/lucene-spatial-6.4.0.jar.sha1 @@ -0,0 +1 @@ +5d4b3ce4df83d0509e0b5f7eecda72af458ba225 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-spatial-extras-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 2614704c057..00000000000 --- a/core/licenses/lucene-spatial-extras-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0fa2c3e722294e863f3c70a15e97a18397391fb4 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-6.4.0.jar.sha1 b/core/licenses/lucene-spatial-extras-6.4.0.jar.sha1 new file mode 100644 index 00000000000..016fd2b6ddd --- /dev/null +++ b/core/licenses/lucene-spatial-extras-6.4.0.jar.sha1 @@ -0,0 +1 @@ +3b486b51d3aede074ab6de890b427379d40c0438 \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-spatial3d-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 9b1c45581a1..00000000000 --- a/core/licenses/lucene-spatial3d-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -db74c6313965ffdd10d9b19be2eed4ae2c76d2e3 \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-6.4.0.jar.sha1 b/core/licenses/lucene-spatial3d-6.4.0.jar.sha1 new file mode 100644 index 00000000000..3d7cd4e751f --- /dev/null +++ b/core/licenses/lucene-spatial3d-6.4.0.jar.sha1 @@ -0,0 +1 @@ +344097014aeaaa0f94a217f3697e14ceee06581f \ No newline at end of file diff --git a/core/licenses/lucene-suggest-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-suggest-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 91841f474ef..00000000000 --- a/core/licenses/lucene-suggest-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b85ae1121b5fd56df985615a3cdd7b3879e9b92d \ No newline at end of file diff --git a/core/licenses/lucene-suggest-6.4.0.jar.sha1 b/core/licenses/lucene-suggest-6.4.0.jar.sha1 new file mode 100644 index 00000000000..2cfe897d139 --- /dev/null +++ b/core/licenses/lucene-suggest-6.4.0.jar.sha1 @@ -0,0 +1 @@ +6c4706b86718f2653120e0dbfd24e03248dd2ea7 \ No newline at end of file diff --git a/core/src/main/java/org/apache/lucene/analysis/synonym/GraphTokenStreamFiniteStrings.java b/core/src/main/java/org/apache/lucene/analysis/synonym/GraphTokenStreamFiniteStrings.java deleted file mode 100644 index 3d806588eca..00000000000 --- a/core/src/main/java/org/apache/lucene/analysis/synonym/GraphTokenStreamFiniteStrings.java +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.lucene.analysis.synonym; - -import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; - -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.tokenattributes.BytesTermAttribute; -import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; -import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.IntsRef; -import org.apache.lucene.util.automaton.Automaton; -import org.apache.lucene.util.automaton.FiniteStringsIterator; -import org.apache.lucene.util.automaton.Operations; -import org.apache.lucene.util.automaton.Transition; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * Creates a list of {@link TokenStream} where each stream is the tokens that make up a finite string in graph token stream. To do this, - * the graph token stream is converted to an {@link Automaton} and from there we use a {@link FiniteStringsIterator} to collect the various - * token streams for each finite string. - */ -public class GraphTokenStreamFiniteStrings { - private final Automaton.Builder builder; - Automaton det; - private final Map termToID = new HashMap<>(); - private final Map idToTerm = new HashMap<>(); - private int anyTermID = -1; - - public GraphTokenStreamFiniteStrings() { - this.builder = new Automaton.Builder(); - } - - private static class BytesRefArrayTokenStream extends TokenStream { - private final BytesTermAttribute termAtt = addAttribute(BytesTermAttribute.class); - private final BytesRef[] terms; - private int offset; - - BytesRefArrayTokenStream(BytesRef[] terms) { - this.terms = terms; - offset = 0; - } - - @Override - public boolean incrementToken() throws IOException { - if (offset < terms.length) { - clearAttributes(); - termAtt.setBytesRef(terms[offset]); - offset = offset + 1; - return true; - } - - return false; - } - } - - /** - * Gets - */ - public List getTokenStreams(final TokenStream in) throws IOException { - // build automation - build(in); - - List tokenStreams = new ArrayList<>(); - final FiniteStringsIterator finiteStrings = new FiniteStringsIterator(det); - for (IntsRef string; (string = finiteStrings.next()) != null; ) { - final BytesRef[] tokens = new BytesRef[string.length]; - for (int idx = string.offset, len = string.offset + string.length; idx < len; idx++) { - tokens[idx - string.offset] = idToTerm.get(string.ints[idx]); - } - - tokenStreams.add(new BytesRefArrayTokenStream(tokens)); - } - - return tokenStreams; - } - - private void build(final TokenStream in) throws IOException { - if (det != null) { - throw new IllegalStateException("Automation already built"); - } - - final TermToBytesRefAttribute termBytesAtt = in.addAttribute(TermToBytesRefAttribute.class); - final PositionIncrementAttribute posIncAtt = in.addAttribute(PositionIncrementAttribute.class); - final PositionLengthAttribute posLengthAtt = in.addAttribute(PositionLengthAttribute.class); - final OffsetAttribute offsetAtt = in.addAttribute(OffsetAttribute.class); - - in.reset(); - - int pos = -1; - int lastPos = 0; - int maxOffset = 0; - int maxPos = -1; - int state = -1; - while (in.incrementToken()) { - int posInc = posIncAtt.getPositionIncrement(); - assert pos > -1 || posInc > 0; - - if (posInc > 1) { - throw new IllegalArgumentException("cannot handle holes; to accept any term, use '*' term"); - } - - if (posInc > 0) { - // New node: - pos += posInc; - } - - int endPos = pos + posLengthAtt.getPositionLength(); - while (state < endPos) { - state = createState(); - } - - BytesRef term = termBytesAtt.getBytesRef(); - //System.out.println(pos + "-" + endPos + ": " + term.utf8ToString() + ": posInc=" + posInc); - if (term.length == 1 && term.bytes[term.offset] == (byte) '*') { - addAnyTransition(pos, endPos); - } else { - addTransition(pos, endPos, term); - } - - maxOffset = Math.max(maxOffset, offsetAtt.endOffset()); - maxPos = Math.max(maxPos, endPos); - } - - in.end(); - - // TODO: look at endOffset? ts2a did... - - // TODO: this (setting "last" state as the only accept state) may be too simplistic? - setAccept(state, true); - finish(); - } - - /** - * Returns a new state; state 0 is always the initial state. - */ - private int createState() { - return builder.createState(); - } - - /** - * Marks the specified state as accept or not. - */ - private void setAccept(int state, boolean accept) { - builder.setAccept(state, accept); - } - - /** - * Adds a transition to the automaton. - */ - private void addTransition(int source, int dest, String term) { - addTransition(source, dest, new BytesRef(term)); - } - - /** - * Adds a transition to the automaton. - */ - private void addTransition(int source, int dest, BytesRef term) { - if (term == null) { - throw new NullPointerException("term should not be null"); - } - builder.addTransition(source, dest, getTermID(term)); - } - - /** - * Adds a transition matching any term. - */ - private void addAnyTransition(int source, int dest) { - builder.addTransition(source, dest, getTermID(null)); - } - - /** - * Call this once you are done adding states/transitions. - */ - private void finish() { - finish(DEFAULT_MAX_DETERMINIZED_STATES); - } - - /** - * Call this once you are done adding states/transitions. - * - * @param maxDeterminizedStates Maximum number of states created when determinizing the automaton. Higher numbers allow this operation - * to consume more memory but allow more complex automatons. - */ - private void finish(int maxDeterminizedStates) { - Automaton automaton = builder.finish(); - - // System.out.println("before det:\n" + automaton.toDot()); - - Transition t = new Transition(); - - // TODO: should we add "eps back to initial node" for all states, - // and det that? then we don't need to revisit initial node at - // every position? but automaton could blow up? And, this makes it - // harder to skip useless positions at search time? - - if (anyTermID != -1) { - - // Make sure there are no leading or trailing ANY: - int count = automaton.initTransition(0, t); - for (int i = 0; i < count; i++) { - automaton.getNextTransition(t); - if (anyTermID >= t.min && anyTermID <= t.max) { - throw new IllegalStateException("automaton cannot lead with an ANY transition"); - } - } - - int numStates = automaton.getNumStates(); - for (int i = 0; i < numStates; i++) { - count = automaton.initTransition(i, t); - for (int j = 0; j < count; j++) { - automaton.getNextTransition(t); - if (automaton.isAccept(t.dest) && anyTermID >= t.min && anyTermID <= t.max) { - throw new IllegalStateException("automaton cannot end with an ANY transition"); - } - } - } - - int termCount = termToID.size(); - - // We have to carefully translate these transitions so automaton - // realizes they also match all other terms: - Automaton newAutomaton = new Automaton(); - for (int i = 0; i < numStates; i++) { - newAutomaton.createState(); - newAutomaton.setAccept(i, automaton.isAccept(i)); - } - - for (int i = 0; i < numStates; i++) { - count = automaton.initTransition(i, t); - for (int j = 0; j < count; j++) { - automaton.getNextTransition(t); - int min, max; - if (t.min <= anyTermID && anyTermID <= t.max) { - // Match any term - min = 0; - max = termCount - 1; - } else { - min = t.min; - max = t.max; - } - newAutomaton.addTransition(t.source, t.dest, min, max); - } - } - newAutomaton.finishState(); - automaton = newAutomaton; - } - - det = Operations.removeDeadStates(Operations.determinize(automaton, maxDeterminizedStates)); - } - - private int getTermID(BytesRef term) { - Integer id = termToID.get(term); - if (id == null) { - id = termToID.size(); - if (term != null) { - term = BytesRef.deepCopyOf(term); - } - termToID.put(term, id); - idToTerm.put(id, term); - if (term == null) { - anyTermID = id; - } - } - - return id; - } -} diff --git a/core/src/main/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java b/core/src/main/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java deleted file mode 100644 index f2c27679ab6..00000000000 --- a/core/src/main/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java +++ /dev/null @@ -1,588 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.lucene.analysis.synonym; - -import org.apache.lucene.analysis.TokenFilter; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.analysis.tokenattributes.FlagsAttribute; -import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; -import org.apache.lucene.analysis.tokenattributes.TypeAttribute; -import org.apache.lucene.store.ByteArrayDataInput; -import org.apache.lucene.util.AttributeSource; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.CharsRefBuilder; -import org.apache.lucene.util.RollingBuffer; -import org.apache.lucene.util.fst.FST; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.LinkedList; -import java.util.List; - -// TODO: maybe we should resolve token -> wordID then run -// FST on wordIDs, for better perf? - -// TODO: a more efficient approach would be Aho/Corasick's -// algorithm -// http://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_string_matching_algorithm -// It improves over the current approach here -// because it does not fully re-start matching at every -// token. For example if one pattern is "a b c x" -// and another is "b c d" and the input is "a b c d", on -// trying to parse "a b c x" but failing when you got to x, -// rather than starting over again your really should -// immediately recognize that "b c d" matches at the next -// input. I suspect this won't matter that much in -// practice, but it's possible on some set of synonyms it -// will. We'd have to modify Aho/Corasick to enforce our -// conflict resolving (eg greedy matching) because that algo -// finds all matches. This really amounts to adding a .* -// closure to the FST and then determinizing it. -// -// Another possible solution is described at http://www.cis.uni-muenchen.de/people/Schulz/Pub/dictle5.ps - -/** - * Applies single- or multi-token synonyms from a {@link SynonymMap} - * to an incoming {@link TokenStream}, producing a fully correct graph - * output. This is a replacement for {@link SynonymFilter}, which produces - * incorrect graphs for multi-token synonyms. - * - * NOTE: this cannot consume an incoming graph; results will - * be undefined. - */ -public final class SynonymGraphFilter extends TokenFilter { - - public static final String TYPE_SYNONYM = "SYNONYM"; - public static final int GRAPH_FLAG = 8; - - private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); - private final PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class); - private final FlagsAttribute flagsAtt = addAttribute(FlagsAttribute.class); - - private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class); - private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); - - private final SynonymMap synonyms; - private final boolean ignoreCase; - - private final FST fst; - - private final FST.BytesReader fstReader; - private final FST.Arc scratchArc; - private final ByteArrayDataInput bytesReader = new ByteArrayDataInput(); - private final BytesRef scratchBytes = new BytesRef(); - private final CharsRefBuilder scratchChars = new CharsRefBuilder(); - private final LinkedList outputBuffer = new LinkedList<>(); - - private int nextNodeOut; - private int lastNodeOut; - private int maxLookaheadUsed; - - // For testing: - private int captureCount; - - private boolean liveToken; - - // Start/end offset of the current match: - private int matchStartOffset; - private int matchEndOffset; - - // True once the input TokenStream is exhausted: - private boolean finished; - - private int lookaheadNextRead; - private int lookaheadNextWrite; - - private RollingBuffer lookahead = new RollingBuffer() { - @Override - protected BufferedInputToken newInstance() { - return new BufferedInputToken(); - } - }; - - static class BufferedInputToken implements RollingBuffer.Resettable { - final CharsRefBuilder term = new CharsRefBuilder(); - AttributeSource.State state; - int startOffset = -1; - int endOffset = -1; - - @Override - public void reset() { - state = null; - term.clear(); - - // Intentionally invalid to ferret out bugs: - startOffset = -1; - endOffset = -1; - } - } - - static class BufferedOutputToken { - final String term; - - // Non-null if this was an incoming token: - final State state; - - final int startNode; - final int endNode; - - public BufferedOutputToken(State state, String term, int startNode, int endNode) { - this.state = state; - this.term = term; - this.startNode = startNode; - this.endNode = endNode; - } - } - - public SynonymGraphFilter(TokenStream input, SynonymMap synonyms, boolean ignoreCase) { - super(input); - this.synonyms = synonyms; - this.fst = synonyms.fst; - if (fst == null) { - throw new IllegalArgumentException("fst must be non-null"); - } - this.fstReader = fst.getBytesReader(); - scratchArc = new FST.Arc<>(); - this.ignoreCase = ignoreCase; - } - - @Override - public boolean incrementToken() throws IOException { - //System.out.println("\nS: incrToken lastNodeOut=" + lastNodeOut + " nextNodeOut=" + nextNodeOut); - - assert lastNodeOut <= nextNodeOut; - - if (outputBuffer.isEmpty() == false) { - // We still have pending outputs from a prior synonym match: - releaseBufferedToken(); - //System.out.println(" syn: ret buffered=" + this); - assert liveToken == false; - return true; - } - - // Try to parse a new synonym match at the current token: - - if (parse()) { - // A new match was found: - releaseBufferedToken(); - //System.out.println(" syn: after parse, ret buffered=" + this); - assert liveToken == false; - return true; - } - - if (lookaheadNextRead == lookaheadNextWrite) { - - // Fast path: parse pulled one token, but it didn't match - // the start for any synonym, so we now return it "live" w/o having - // cloned all of its atts: - if (finished) { - //System.out.println(" syn: ret END"); - return false; - } - - assert liveToken; - liveToken = false; - - // NOTE: no need to change posInc since it's relative, i.e. whatever - // node our output is upto will just increase by the incoming posInc. - // We also don't need to change posLen, but only because we cannot - // consume a graph, so the incoming token can never span a future - // synonym match. - - } else { - // We still have buffered lookahead tokens from a previous - // parse attempt that required lookahead; just replay them now: - //System.out.println(" restore buffer"); - assert lookaheadNextRead < lookaheadNextWrite : "read=" + lookaheadNextRead + " write=" + lookaheadNextWrite; - BufferedInputToken token = lookahead.get(lookaheadNextRead); - lookaheadNextRead++; - - restoreState(token.state); - - lookahead.freeBefore(lookaheadNextRead); - - //System.out.println(" after restore offset=" + offsetAtt.startOffset() + "-" + offsetAtt.endOffset()); - assert liveToken == false; - } - - lastNodeOut += posIncrAtt.getPositionIncrement(); - nextNodeOut = lastNodeOut + posLenAtt.getPositionLength(); - - //System.out.println(" syn: ret lookahead=" + this); - - return true; - } - - private void releaseBufferedToken() throws IOException { - //System.out.println(" releaseBufferedToken"); - - BufferedOutputToken token = outputBuffer.pollFirst(); - - if (token.state != null) { - // This is an original input token (keepOrig=true case): - //System.out.println(" hasState"); - restoreState(token.state); - //System.out.println(" startOffset=" + offsetAtt.startOffset() + " endOffset=" + offsetAtt.endOffset()); - } else { - clearAttributes(); - //System.out.println(" no state"); - termAtt.append(token.term); - - // We better have a match already: - assert matchStartOffset != -1; - - offsetAtt.setOffset(matchStartOffset, matchEndOffset); - //System.out.println(" startOffset=" + matchStartOffset + " endOffset=" + matchEndOffset); - typeAtt.setType(TYPE_SYNONYM); - } - - //System.out.println(" lastNodeOut=" + lastNodeOut); - //System.out.println(" term=" + termAtt); - - posIncrAtt.setPositionIncrement(token.startNode - lastNodeOut); - lastNodeOut = token.startNode; - posLenAtt.setPositionLength(token.endNode - token.startNode); - flagsAtt.setFlags(flagsAtt.getFlags() | GRAPH_FLAG); // set the graph flag - } - - /** - * Scans the next input token(s) to see if a synonym matches. Returns true - * if a match was found. - */ - private boolean parse() throws IOException { - // System.out.println(Thread.currentThread().getName() + ": S: parse: " + System.identityHashCode(this)); - - // Holds the longest match we've seen so far: - BytesRef matchOutput = null; - int matchInputLength = 0; - - BytesRef pendingOutput = fst.outputs.getNoOutput(); - fst.getFirstArc(scratchArc); - - assert scratchArc.output == fst.outputs.getNoOutput(); - - // How many tokens in the current match - int matchLength = 0; - boolean doFinalCapture = false; - - int lookaheadUpto = lookaheadNextRead; - matchStartOffset = -1; - - byToken: - while (true) { - //System.out.println(" cycle lookaheadUpto=" + lookaheadUpto + " maxPos=" + lookahead.getMaxPos()); - - // Pull next token's chars: - final char[] buffer; - final int bufferLen; - final int inputEndOffset; - - if (lookaheadUpto <= lookahead.getMaxPos()) { - // Still in our lookahead buffer - BufferedInputToken token = lookahead.get(lookaheadUpto); - lookaheadUpto++; - buffer = token.term.chars(); - bufferLen = token.term.length(); - inputEndOffset = token.endOffset; - //System.out.println(" use buffer now max=" + lookahead.getMaxPos()); - if (matchStartOffset == -1) { - matchStartOffset = token.startOffset; - } - } else { - - // We used up our lookahead buffer of input tokens - // -- pull next real input token: - - assert finished || liveToken == false; - - if (finished) { - //System.out.println(" break: finished"); - break; - } else if (input.incrementToken()) { - //System.out.println(" input.incrToken"); - liveToken = true; - buffer = termAtt.buffer(); - bufferLen = termAtt.length(); - if (matchStartOffset == -1) { - matchStartOffset = offsetAtt.startOffset(); - } - inputEndOffset = offsetAtt.endOffset(); - - lookaheadUpto++; - } else { - // No more input tokens - finished = true; - //System.out.println(" break: now set finished"); - break; - } - } - - matchLength++; - //System.out.println(" cycle term=" + new String(buffer, 0, bufferLen)); - - // Run each char in this token through the FST: - int bufUpto = 0; - while (bufUpto < bufferLen) { - final int codePoint = Character.codePointAt(buffer, bufUpto, bufferLen); - if (fst.findTargetArc(ignoreCase ? Character.toLowerCase(codePoint) : codePoint, scratchArc, scratchArc, fstReader) == - null) { - break byToken; - } - - // Accum the output - pendingOutput = fst.outputs.add(pendingOutput, scratchArc.output); - bufUpto += Character.charCount(codePoint); - } - - assert bufUpto == bufferLen; - - // OK, entire token matched; now see if this is a final - // state in the FST (a match): - if (scratchArc.isFinal()) { - matchOutput = fst.outputs.add(pendingOutput, scratchArc.nextFinalOutput); - matchInputLength = matchLength; - matchEndOffset = inputEndOffset; - //System.out.println(" ** match"); - } - - // See if the FST can continue matching (ie, needs to - // see the next input token): - if (fst.findTargetArc(SynonymMap.WORD_SEPARATOR, scratchArc, scratchArc, fstReader) == null) { - // No further rules can match here; we're done - // searching for matching rules starting at the - // current input position. - break; - } else { - // More matching is possible -- accum the output (if - // any) of the WORD_SEP arc: - pendingOutput = fst.outputs.add(pendingOutput, scratchArc.output); - doFinalCapture = true; - if (liveToken) { - capture(); - } - } - } - - if (doFinalCapture && liveToken && finished == false) { - // Must capture the final token if we captured any prior tokens: - capture(); - } - - if (matchOutput != null) { - - if (liveToken) { - // Single input token synonym; we must buffer it now: - capture(); - } - - // There is a match! - bufferOutputTokens(matchOutput, matchInputLength); - lookaheadNextRead += matchInputLength; - //System.out.println(" precmatch; set lookaheadNextRead=" + lookaheadNextRead + " now max=" + lookahead.getMaxPos()); - lookahead.freeBefore(lookaheadNextRead); - //System.out.println(" match; set lookaheadNextRead=" + lookaheadNextRead + " now max=" + lookahead.getMaxPos()); - return true; - } else { - //System.out.println(" no match; lookaheadNextRead=" + lookaheadNextRead); - return false; - } - - //System.out.println(" parse done inputSkipCount=" + inputSkipCount + " nextRead=" + nextRead + " nextWrite=" + nextWrite); - } - - /** - * Expands the output graph into the necessary tokens, adding - * synonyms as side paths parallel to the input tokens, and - * buffers them in the output token buffer. - */ - private void bufferOutputTokens(BytesRef bytes, int matchInputLength) { - bytesReader.reset(bytes.bytes, bytes.offset, bytes.length); - - final int code = bytesReader.readVInt(); - final boolean keepOrig = (code & 0x1) == 0; - //System.out.println(" buffer: keepOrig=" + keepOrig + " matchInputLength=" + matchInputLength); - - // How many nodes along all paths; we need this to assign the - // node ID for the final end node where all paths merge back: - int totalPathNodes; - if (keepOrig) { - assert matchInputLength > 0; - totalPathNodes = matchInputLength - 1; - } else { - totalPathNodes = 0; - } - - // How many synonyms we will insert over this match: - final int count = code >>> 1; - - // TODO: we could encode this instead into the FST: - - // 1st pass: count how many new nodes we need - List> paths = new ArrayList<>(); - for (int outputIDX = 0; outputIDX < count; outputIDX++) { - int wordID = bytesReader.readVInt(); - synonyms.words.get(wordID, scratchBytes); - scratchChars.copyUTF8Bytes(scratchBytes); - int lastStart = 0; - - List path = new ArrayList<>(); - paths.add(path); - int chEnd = scratchChars.length(); - for (int chUpto = 0; chUpto <= chEnd; chUpto++) { - if (chUpto == chEnd || scratchChars.charAt(chUpto) == SynonymMap.WORD_SEPARATOR) { - path.add(new String(scratchChars.chars(), lastStart, chUpto - lastStart)); - lastStart = 1 + chUpto; - } - } - - assert path.size() > 0; - totalPathNodes += path.size() - 1; - } - //System.out.println(" totalPathNodes=" + totalPathNodes); - - // 2nd pass: buffer tokens for the graph fragment - - // NOTE: totalPathNodes will be 0 in the case where the matched - // input is a single token and all outputs are also a single token - - // We "spawn" a side-path for each of the outputs for this matched - // synonym, all ending back at this end node: - - int startNode = nextNodeOut; - - int endNode = startNode + totalPathNodes + 1; - //System.out.println(" " + paths.size() + " new side-paths"); - - // First, fanout all tokens departing start node for these new side paths: - int newNodeCount = 0; - for (List path : paths) { - int pathEndNode; - //System.out.println(" path size=" + path.size()); - if (path.size() == 1) { - // Single token output, so there are no intermediate nodes: - pathEndNode = endNode; - } else { - pathEndNode = nextNodeOut + newNodeCount + 1; - newNodeCount += path.size() - 1; - } - outputBuffer.add(new BufferedOutputToken(null, path.get(0), startNode, pathEndNode)); - } - - // We must do the original tokens last, else the offsets "go backwards": - if (keepOrig) { - BufferedInputToken token = lookahead.get(lookaheadNextRead); - int inputEndNode; - if (matchInputLength == 1) { - // Single token matched input, so there are no intermediate nodes: - inputEndNode = endNode; - } else { - inputEndNode = nextNodeOut + newNodeCount + 1; - } - - //System.out.println(" keepOrig first token: " + token.term); - - outputBuffer.add(new BufferedOutputToken(token.state, token.term.toString(), startNode, inputEndNode)); - } - - nextNodeOut = endNode; - - // Do full side-path for each syn output: - for (int pathID = 0; pathID < paths.size(); pathID++) { - List path = paths.get(pathID); - if (path.size() > 1) { - int lastNode = outputBuffer.get(pathID).endNode; - for (int i = 1; i < path.size() - 1; i++) { - outputBuffer.add(new BufferedOutputToken(null, path.get(i), lastNode, lastNode + 1)); - lastNode++; - } - outputBuffer.add(new BufferedOutputToken(null, path.get(path.size() - 1), lastNode, endNode)); - } - } - - if (keepOrig && matchInputLength > 1) { - // Do full "side path" with the original tokens: - int lastNode = outputBuffer.get(paths.size()).endNode; - for (int i = 1; i < matchInputLength - 1; i++) { - BufferedInputToken token = lookahead.get(lookaheadNextRead + i); - outputBuffer.add(new BufferedOutputToken(token.state, token.term.toString(), lastNode, lastNode + 1)); - lastNode++; - } - BufferedInputToken token = lookahead.get(lookaheadNextRead + matchInputLength - 1); - outputBuffer.add(new BufferedOutputToken(token.state, token.term.toString(), lastNode, endNode)); - } - - /* - System.out.println(" after buffer: " + outputBuffer.size() + " tokens:"); - for(BufferedOutputToken token : outputBuffer) { - System.out.println(" tok: " + token.term + " startNode=" + token.startNode + " endNode=" + token.endNode); - } - */ - } - - /** - * Buffers the current input token into lookahead buffer. - */ - private void capture() { - assert liveToken; - liveToken = false; - BufferedInputToken token = lookahead.get(lookaheadNextWrite); - lookaheadNextWrite++; - - token.state = captureState(); - token.startOffset = offsetAtt.startOffset(); - token.endOffset = offsetAtt.endOffset(); - assert token.term.length() == 0; - token.term.append(termAtt); - - captureCount++; - maxLookaheadUsed = Math.max(maxLookaheadUsed, lookahead.getBufferSize()); - //System.out.println(" maxLookaheadUsed=" + maxLookaheadUsed); - } - - @Override - public void reset() throws IOException { - super.reset(); - lookahead.reset(); - lookaheadNextWrite = 0; - lookaheadNextRead = 0; - captureCount = 0; - lastNodeOut = -1; - nextNodeOut = 0; - matchStartOffset = -1; - matchEndOffset = -1; - finished = false; - liveToken = false; - outputBuffer.clear(); - maxLookaheadUsed = 0; - //System.out.println("S: reset"); - } - - // for testing - int getCaptureCount() { - return captureCount; - } - - // for testing - int getMaxLookaheadUsed() { - return maxLookaheadUsed; - } -} diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index 976c4706725..c1998c65000 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -19,6 +19,9 @@ package org.apache.lucene.queryparser.classic; +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; @@ -30,6 +33,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.GraphQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; @@ -55,9 +59,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded; - /** * A query parser that uses the {@link MapperService} in order to build smarter * queries based on the mapping information. @@ -739,27 +740,48 @@ public class MapperQueryParser extends AnalyzingQueryParser { private Query applySlop(Query q, int slop) { if (q instanceof PhraseQuery) { - PhraseQuery pq = (PhraseQuery) q; - PhraseQuery.Builder builder = new PhraseQuery.Builder(); - builder.setSlop(slop); - final Term[] terms = pq.getTerms(); - final int[] positions = pq.getPositions(); - for (int i = 0; i < terms.length; ++i) { - builder.add(terms[i], positions[i]); - } - pq = builder.build(); //make sure that the boost hasn't been set beforehand, otherwise we'd lose it assert q instanceof BoostQuery == false; - return pq; + return addSlopToPhrase((PhraseQuery) q, slop); } else if (q instanceof MultiPhraseQuery) { MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder((MultiPhraseQuery) q); builder.setSlop(slop); return builder.build(); + } else if (q instanceof GraphQuery && ((GraphQuery) q).hasPhrase()) { + // we have a graph query that has at least one phrase sub-query + // re-build and set slop on all phrase queries + List oldQueries = ((GraphQuery) q).getQueries(); + Query[] queries = new Query[oldQueries.size()]; + for (int i = 0; i < queries.length; i++) { + Query oldQuery = oldQueries.get(i); + if (oldQuery instanceof PhraseQuery) { + queries[i] = addSlopToPhrase((PhraseQuery) oldQuery, slop); + } else { + queries[i] = oldQuery; + } + } + + return new GraphQuery(queries); } else { return q; } } + /** + * Rebuild a phrase query with a slop value + */ + private PhraseQuery addSlopToPhrase(PhraseQuery query, int slop) { + PhraseQuery.Builder builder = new PhraseQuery.Builder(); + builder.setSlop(slop); + final Term[] terms = query.getTerms(); + final int[] positions = query.getPositions(); + for (int i = 0; i < terms.length; ++i) { + builder.add(terms[i], positions[i]); + } + + return builder.build(); + } + private Collection extractMultiFields(String field) { Collection fields; if (field != null) { diff --git a/core/src/main/java/org/apache/lucene/search/GraphQuery.java b/core/src/main/java/org/apache/lucene/search/GraphQuery.java deleted file mode 100644 index cad316d701c..00000000000 --- a/core/src/main/java/org/apache/lucene/search/GraphQuery.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.lucene.search; - -import org.apache.lucene.index.IndexReader; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Objects; - -/** - * A query that wraps multiple sub-queries generated from a graph token stream. - */ -public final class GraphQuery extends Query { - private final Query[] queries; - private final boolean hasBoolean; - - /** - * Constructor sets the queries and checks if any of them are - * a boolean query. - * - * @param queries the non-null array of queries - */ - public GraphQuery(Query... queries) { - this.queries = Objects.requireNonNull(queries).clone(); - for (Query query : queries) { - if (query instanceof BooleanQuery) { - hasBoolean = true; - return; - } - } - hasBoolean = false; - } - - /** - * Gets the queries - * - * @return unmodifiable list of Query - */ - public List getQueries() { - return Collections.unmodifiableList(Arrays.asList(queries)); - } - - /** - * If there is at least one boolean query or not. - * - * @return true if there is a boolean, false if not - */ - public boolean hasBoolean() { - return hasBoolean; - } - - /** - * Rewrites to a single query or a boolean query where each query is a SHOULD clause. - */ - @Override - public Query rewrite(IndexReader reader) throws IOException { - if (queries.length == 0) { - return new BooleanQuery.Builder().build(); - } - - if (queries.length == 1) { - return queries[0]; - } - - BooleanQuery.Builder q = new BooleanQuery.Builder(); - q.setDisableCoord(true); - for (Query clause : queries) { - q.add(clause, BooleanClause.Occur.SHOULD); - } - - return q.build(); - } - - @Override - public String toString(String field) { - StringBuilder builder = new StringBuilder("Graph("); - for (int i = 0; i < queries.length; i++) { - if (i != 0) { - builder.append(", "); - } - builder.append(Objects.toString(queries[i])); - } - builder.append(")"); - return builder.toString(); - } - - @Override - public boolean equals(Object other) { - return sameClassAs(other) && - Arrays.equals(queries, ((GraphQuery) other).queries); - } - - @Override - public int hashCode() { - return 31 * classHash() + Arrays.hashCode(queries); - } -} diff --git a/core/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java b/core/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java new file mode 100644 index 00000000000..169a89edbcf --- /dev/null +++ b/core/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java @@ -0,0 +1,215 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.lucene.search.grouping; + +import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.util.PriorityQueue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Represents hits returned by {@link CollapsingTopDocsCollector#getTopDocs()}. + */ +public class CollapseTopFieldDocs extends TopFieldDocs { + /** The field used for collapsing **/ + public final String field; + /** The collapse value for each top doc */ + public final Object[] collapseValues; + + public CollapseTopFieldDocs(String field, int totalHits, ScoreDoc[] scoreDocs, + SortField[] sortFields, Object[] values, float maxScore) { + super(totalHits, scoreDocs, sortFields, maxScore); + this.field = field; + this.collapseValues = values; + } + + // Refers to one hit: + private static class ShardRef { + // Which shard (index into shardHits[]): + final int shardIndex; + + // Which hit within the shard: + int hitIndex; + + ShardRef(int shardIndex) { + this.shardIndex = shardIndex; + } + + @Override + public String toString() { + return "ShardRef(shardIndex=" + shardIndex + " hitIndex=" + hitIndex + ")"; + } + }; + + private static class MergeSortQueue extends PriorityQueue { + // These are really FieldDoc instances: + final ScoreDoc[][] shardHits; + final FieldComparator[] comparators; + final int[] reverseMul; + + MergeSortQueue(Sort sort, CollapseTopFieldDocs[] shardHits) throws IOException { + super(shardHits.length); + this.shardHits = new ScoreDoc[shardHits.length][]; + for (int shardIDX = 0; shardIDX < shardHits.length; shardIDX++) { + final ScoreDoc[] shard = shardHits[shardIDX].scoreDocs; + if (shard != null) { + this.shardHits[shardIDX] = shard; + // Fail gracefully if API is misused: + for (int hitIDX = 0; hitIDX < shard.length; hitIDX++) { + final ScoreDoc sd = shard[hitIDX]; + final FieldDoc gd = (FieldDoc) sd; + assert gd.fields != null; + } + } + } + + final SortField[] sortFields = sort.getSort(); + comparators = new FieldComparator[sortFields.length]; + reverseMul = new int[sortFields.length]; + for (int compIDX = 0; compIDX < sortFields.length; compIDX++) { + final SortField sortField = sortFields[compIDX]; + comparators[compIDX] = sortField.getComparator(1, compIDX); + reverseMul[compIDX] = sortField.getReverse() ? -1 : 1; + } + } + + // Returns true if first is < second + @Override + public boolean lessThan(ShardRef first, ShardRef second) { + assert first != second; + final FieldDoc firstFD = (FieldDoc) shardHits[first.shardIndex][first.hitIndex]; + final FieldDoc secondFD = (FieldDoc) shardHits[second.shardIndex][second.hitIndex]; + + for (int compIDX = 0; compIDX < comparators.length; compIDX++) { + final FieldComparator comp = comparators[compIDX]; + + final int cmp = + reverseMul[compIDX] * comp.compareValues(firstFD.fields[compIDX], secondFD.fields[compIDX]); + + if (cmp != 0) { + return cmp < 0; + } + } + + // Tie break: earlier shard wins + if (first.shardIndex < second.shardIndex) { + return true; + } else if (first.shardIndex > second.shardIndex) { + return false; + } else { + // Tie break in same shard: resolve however the + // shard had resolved it: + assert first.hitIndex != second.hitIndex; + return first.hitIndex < second.hitIndex; + } + } + } + + /** + * Returns a new CollapseTopDocs, containing topN collapsed results across + * the provided CollapseTopDocs, sorting by score. Each {@link CollapseTopFieldDocs} instance must be sorted. + **/ + public static CollapseTopFieldDocs merge(Sort sort, int start, int size, + CollapseTopFieldDocs[] shardHits) throws IOException { + String collapseField = shardHits[0].field; + for (int i = 1; i < shardHits.length; i++) { + if (collapseField.equals(shardHits[i].field) == false) { + throw new IllegalArgumentException("collapse field differ across shards [" + + collapseField + "] != [" + shardHits[i].field + "]"); + } + } + final PriorityQueue queue = new MergeSortQueue(sort, shardHits); + + int totalHitCount = 0; + int availHitCount = 0; + float maxScore = Float.MIN_VALUE; + for(int shardIDX=0;shardIDX 0) { + availHitCount += shard.scoreDocs.length; + queue.add(new ShardRef(shardIDX)); + maxScore = Math.max(maxScore, shard.getMaxScore()); + } + } + + if (availHitCount == 0) { + maxScore = Float.NaN; + } + + final ScoreDoc[] hits; + final Object[] values; + if (availHitCount <= start) { + hits = new ScoreDoc[0]; + values = new Object[0]; + } else { + List hitList = new ArrayList<>(); + List collapseList = new ArrayList<>(); + int requestedResultWindow = start + size; + int numIterOnHits = Math.min(availHitCount, requestedResultWindow); + int hitUpto = 0; + Set seen = new HashSet<>(); + while (hitUpto < numIterOnHits) { + if (queue.size() == 0) { + break; + } + ShardRef ref = queue.top(); + final ScoreDoc hit = shardHits[ref.shardIndex].scoreDocs[ref.hitIndex]; + final Object collapseValue = shardHits[ref.shardIndex].collapseValues[ref.hitIndex++]; + if (seen.contains(collapseValue)) { + if (ref.hitIndex < shardHits[ref.shardIndex].scoreDocs.length) { + queue.updateTop(); + } else { + queue.pop(); + } + continue; + } + seen.add(collapseValue); + hit.shardIndex = ref.shardIndex; + if (hitUpto >= start) { + hitList.add(hit); + collapseList.add(collapseValue); + } + + hitUpto++; + + if (ref.hitIndex < shardHits[ref.shardIndex].scoreDocs.length) { + // Not done with this these TopDocs yet: + queue.updateTop(); + } else { + queue.pop(); + } + } + hits = hitList.toArray(new ScoreDoc[0]); + values = collapseList.toArray(new Object[0]); + } + return new CollapseTopFieldDocs(collapseField, totalHitCount, hits, sort.getSort(), values, maxScore); + } +} diff --git a/core/src/main/java/org/apache/lucene/search/grouping/CollapsingDocValuesSource.java b/core/src/main/java/org/apache/lucene/search/grouping/CollapsingDocValuesSource.java new file mode 100644 index 00000000000..5bc8afb347c --- /dev/null +++ b/core/src/main/java/org/apache/lucene/search/grouping/CollapsingDocValuesSource.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.lucene.search.grouping; + +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.BytesRef; + +import java.io.IOException; + +/** + * Utility class that ensures that a single collapse key is extracted per document. + */ +abstract class CollapsingDocValuesSource { + protected final String field; + + CollapsingDocValuesSource(String field) throws IOException { + this.field = field; + } + + abstract T get(int doc); + + abstract T copy(T value, T reuse); + + abstract void setNextReader(LeafReader reader) throws IOException; + + /** + * Implementation for {@link NumericDocValues} and {@link SortedNumericDocValues}. + * Fails with an {@link IllegalStateException} if a document contains multiple values for the specified field. + */ + static class Numeric extends CollapsingDocValuesSource { + private NumericDocValues values; + private Bits docsWithField; + + Numeric(String field) throws IOException { + super(field); + } + + @Override + public Long get(int doc) { + if (docsWithField.get(doc)) { + return values.get(doc); + } else { + return null; + } + } + + @Override + public Long copy(Long value, Long reuse) { + return value; + } + + @Override + public void setNextReader(LeafReader reader) throws IOException { + DocValuesType type = getDocValuesType(reader, field); + if (type == null || type == DocValuesType.NONE) { + values = DocValues.emptyNumeric(); + docsWithField = new Bits.MatchNoBits(reader.maxDoc()); + return ; + } + docsWithField = DocValues.getDocsWithField(reader, field); + switch (type) { + case NUMERIC: + values = DocValues.getNumeric(reader, field); + break; + + case SORTED_NUMERIC: + final SortedNumericDocValues sorted = DocValues.getSortedNumeric(reader, field); + values = DocValues.unwrapSingleton(sorted); + if (values == null) { + values = new NumericDocValues() { + @Override + public long get(int docID) { + sorted.setDocument(docID); + assert sorted.count() > 0; + if (sorted.count() > 1) { + throw new IllegalStateException("failed to collapse " + docID + + ", the collapse field must be single valued"); + } + return sorted.valueAt(0); + } + }; + } + break; + + default: + throw new IllegalStateException("unexpected doc values type " + + type + "` for field `" + field + "`"); + } + } + } + + /** + * Implementation for {@link SortedDocValues} and {@link SortedSetDocValues}. + * Fails with an {@link IllegalStateException} if a document contains multiple values for the specified field. + */ + static class Keyword extends CollapsingDocValuesSource { + private Bits docsWithField; + private SortedDocValues values; + + Keyword(String field) throws IOException { + super(field); + } + + @Override + public BytesRef get(int doc) { + if (docsWithField.get(doc)) { + return values.get(doc); + } else { + return null; + } + } + + @Override + public BytesRef copy(BytesRef value, BytesRef reuse) { + if (value == null) { + return null; + } + if (reuse != null) { + reuse.bytes = ArrayUtil.grow(reuse.bytes, value.length); + reuse.offset = 0; + reuse.length = value.length; + System.arraycopy(value.bytes, value.offset, reuse.bytes, 0, value.length); + return reuse; + } else { + return BytesRef.deepCopyOf(value); + } + } + + @Override + public void setNextReader(LeafReader reader) throws IOException { + DocValuesType type = getDocValuesType(reader, field); + if (type == null || type == DocValuesType.NONE) { + values = DocValues.emptySorted(); + docsWithField = new Bits.MatchNoBits(reader.maxDoc()); + return ; + } + docsWithField = DocValues.getDocsWithField(reader, field); + switch (type) { + case SORTED: + values = DocValues.getSorted(reader, field); + break; + + case SORTED_SET: + final SortedSetDocValues sorted = DocValues.getSortedSet(reader, field); + values = DocValues.unwrapSingleton(sorted); + if (values == null) { + values = new SortedDocValues() { + @Override + public int getOrd(int docID) { + sorted.setDocument(docID); + int ord = (int) sorted.nextOrd(); + if (sorted.nextOrd() != SortedSetDocValues.NO_MORE_ORDS) { + throw new IllegalStateException("failed to collapse " + docID + + ", the collapse field must be single valued"); + } + return ord; + } + + @Override + public BytesRef lookupOrd(int ord) { + return sorted.lookupOrd(ord); + } + + @Override + public int getValueCount() { + return (int) sorted.getValueCount(); + } + }; + } + break; + + default: + throw new IllegalStateException("unexpected doc values type " + + type + "` for field `" + field + "`"); + } + } + } + + private static DocValuesType getDocValuesType(LeafReader in, String field) { + FieldInfo fi = in.getFieldInfos().fieldInfo(field); + if (fi != null) { + return fi.getDocValuesType(); + } + return null; + } +} diff --git a/core/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java b/core/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java new file mode 100644 index 00000000000..955a63e5483 --- /dev/null +++ b/core/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java @@ -0,0 +1,214 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.lucene.search.grouping; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.util.BytesRef; + +import java.io.IOException; +import java.util.Collection; +import java.util.Iterator; + +import static org.apache.lucene.search.SortField.Type.SCORE; + +/** + * A collector that groups documents based on field values and returns {@link CollapseTopFieldDocs} + * output. The collapsing is done in a single pass by selecting only the top sorted document per collapse key. + * The value used for the collapse key of each group can be found in {@link CollapseTopFieldDocs#collapseValues}. + */ +public abstract class CollapsingTopDocsCollector extends FirstPassGroupingCollector { + protected final String collapseField; + + protected final Sort sort; + protected Scorer scorer; + + private int totalHitCount; + private float maxScore; + private final boolean trackMaxScore; + + private CollapsingTopDocsCollector(String collapseField, Sort sort, + int topN, boolean trackMaxScore) throws IOException { + super(sort, topN); + this.collapseField = collapseField; + this.trackMaxScore = trackMaxScore; + if (trackMaxScore) { + maxScore = Float.NEGATIVE_INFINITY; + } else { + maxScore = Float.NaN; + } + this.sort = sort; + } + + /** + * Transform {@link FirstPassGroupingCollector#getTopGroups(int, boolean)} output in + * {@link CollapseTopFieldDocs}. The collapsing needs only one pass so we can create the final top docs at the end + * of the first pass. + */ + public CollapseTopFieldDocs getTopDocs() { + Collection> groups = super.getTopGroups(0, true); + if (groups == null) { + return new CollapseTopFieldDocs(collapseField, totalHitCount, new ScoreDoc[0], + sort.getSort(), new Object[0], Float.NaN); + } + FieldDoc[] docs = new FieldDoc[groups.size()]; + Object[] collapseValues = new Object[groups.size()]; + int scorePos = -1; + for (int index = 0; index < sort.getSort().length; index++) { + SortField sortField = sort.getSort()[index]; + if (sortField.getType() == SCORE) { + scorePos = index; + break; + } + } + int pos = 0; + Iterator> it = orderedGroups.iterator(); + for (SearchGroup group : groups) { + assert it.hasNext(); + CollectedSearchGroup col = it.next(); + float score = Float.NaN; + if (scorePos != -1) { + score = (float) group.sortValues[scorePos]; + } + docs[pos] = new FieldDoc(col.topDoc, score, group.sortValues); + collapseValues[pos] = group.groupValue; + pos++; + } + return new CollapseTopFieldDocs(collapseField, totalHitCount, docs, sort.getSort(), + collapseValues, maxScore); + } + + @Override + public boolean needsScores() { + if (super.needsScores() == false) { + return trackMaxScore; + } + return true; + } + + @Override + public void setScorer(Scorer scorer) throws IOException { + super.setScorer(scorer); + this.scorer = scorer; + } + + @Override + public void collect(int doc) throws IOException { + super.collect(doc); + if (trackMaxScore) { + maxScore = Math.max(maxScore, scorer.score()); + } + totalHitCount++; + } + + private static class Numeric extends CollapsingTopDocsCollector { + private final CollapsingDocValuesSource.Numeric source; + + private Numeric(String collapseField, Sort sort, int topN, boolean trackMaxScore) throws IOException { + super(collapseField, sort, topN, trackMaxScore); + source = new CollapsingDocValuesSource.Numeric(collapseField); + } + + @Override + protected void doSetNextReader(LeafReaderContext readerContext) throws IOException { + super.doSetNextReader(readerContext); + source.setNextReader(readerContext.reader()); + } + + @Override + protected Long getDocGroupValue(int doc) { + return source.get(doc); + } + + @Override + protected Long copyDocGroupValue(Long groupValue, Long reuse) { + return source.copy(groupValue, reuse); + } + } + + private static class Keyword extends CollapsingTopDocsCollector { + private final CollapsingDocValuesSource.Keyword source; + + private Keyword(String collapseField, Sort sort, int topN, boolean trackMaxScore) throws IOException { + super(collapseField, sort, topN, trackMaxScore); + source = new CollapsingDocValuesSource.Keyword(collapseField); + + } + + @Override + protected void doSetNextReader(LeafReaderContext readerContext) throws IOException { + super.doSetNextReader(readerContext); + source.setNextReader(readerContext.reader()); + } + + @Override + protected BytesRef getDocGroupValue(int doc) { + return source.get(doc); + } + + @Override + protected BytesRef copyDocGroupValue(BytesRef groupValue, BytesRef reuse) { + return source.copy(groupValue, reuse); + } + } + + /** + * Create a collapsing top docs collector on a {@link org.apache.lucene.index.NumericDocValues} field. + * It accepts also {@link org.apache.lucene.index.SortedNumericDocValues} field but + * the collect will fail with an {@link IllegalStateException} if a document contains more than one value for the + * field. + * + * @param collapseField The sort field used to group + * documents. + * @param sort The {@link Sort} used to sort the collapsed hits. + * The collapsing keeps only the top sorted document per collapsed key. + * This must be non-null, ie, if you want to groupSort by relevance + * use Sort.RELEVANCE. + * @param topN How many top groups to keep. + * @throws IOException When I/O related errors occur + */ + public static CollapsingTopDocsCollector createNumeric(String collapseField, Sort sort, + int topN, boolean trackMaxScore) throws IOException { + return new Numeric(collapseField, sort, topN, trackMaxScore); + } + + /** + * Create a collapsing top docs collector on a {@link org.apache.lucene.index.SortedDocValues} field. + * It accepts also {@link org.apache.lucene.index.SortedSetDocValues} field but + * the collect will fail with an {@link IllegalStateException} if a document contains more than one value for the + * field. + * + * @param collapseField The sort field used to group + * documents. + * @param sort The {@link Sort} used to sort the collapsed hits. The collapsing keeps only the top sorted + * document per collapsed key. + * This must be non-null, ie, if you want to groupSort by relevance use Sort.RELEVANCE. + * @param topN How many top groups to keep. + * @throws IOException When I/O related errors occur + */ + public static CollapsingTopDocsCollector createKeyword(String collapseField, Sort sort, + int topN, boolean trackMaxScore) throws IOException { + return new Keyword(collapseField, sort, topN, trackMaxScore); + } +} + diff --git a/core/src/main/java/org/apache/lucene/search/postingshighlight/Snippet.java b/core/src/main/java/org/apache/lucene/search/highlight/Snippet.java similarity index 96% rename from core/src/main/java/org/apache/lucene/search/postingshighlight/Snippet.java rename to core/src/main/java/org/apache/lucene/search/highlight/Snippet.java index f3bfa1b9c65..81a3d406ea3 100644 --- a/core/src/main/java/org/apache/lucene/search/postingshighlight/Snippet.java +++ b/core/src/main/java/org/apache/lucene/search/highlight/Snippet.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.lucene.search.postingshighlight; +package org.apache.lucene.search.highlight; /** * Represents a scored highlighted snippet. diff --git a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java b/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java index 889e7f741ed..a33bf16dee4 100644 --- a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java +++ b/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java @@ -19,6 +19,7 @@ package org.apache.lucene.search.postingshighlight; +import org.apache.lucene.search.highlight.Snippet; import org.apache.lucene.search.highlight.Encoder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightUtils; @@ -46,10 +47,10 @@ public class CustomPassageFormatter extends PassageFormatter { for (int j = 0; j < passages.length; j++) { Passage passage = passages[j]; StringBuilder sb = new StringBuilder(); - pos = passage.startOffset; - for (int i = 0; i < passage.numMatches; i++) { - int start = passage.matchStarts[i]; - int end = passage.matchEnds[i]; + pos = passage.getStartOffset(); + for (int i = 0; i < passage.getNumMatches(); i++) { + int start = passage.getMatchStarts()[i]; + int end = passage.getMatchEnds()[i]; // its possible to have overlapping terms if (start > pos) { append(sb, content, pos, start); @@ -62,7 +63,7 @@ public class CustomPassageFormatter extends PassageFormatter { } } // its possible a "term" from the analyzer could span a sentence boundary. - append(sb, content, pos, Math.max(pos, passage.endOffset)); + append(sb, content, pos, Math.max(pos, passage.getEndOffset())); //we remove the paragraph separator if present at the end of the snippet (we used it as separator between values) if (sb.charAt(sb.length() - 1) == HighlightUtils.PARAGRAPH_SEPARATOR) { sb.deleteCharAt(sb.length() - 1); @@ -70,7 +71,7 @@ public class CustomPassageFormatter extends PassageFormatter { sb.deleteCharAt(sb.length() - 1); } //and we trim the snippets too - snippets[j] = new Snippet(sb.toString().trim(), passage.score, passage.numMatches > 0); + snippets[j] = new Snippet(sb.toString().trim(), passage.getScore(), passage.getNumMatches() > 0); } return snippets; } diff --git a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java b/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java index 30f57b2626c..ac90a3e57ae 100644 --- a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java +++ b/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java @@ -22,6 +22,7 @@ package org.apache.lucene.search.postingshighlight; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.highlight.Snippet; import java.io.IOException; import java.text.BreakIterator; diff --git a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java index c65f962dbb8..5eaf63369b9 100644 --- a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java +++ b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java @@ -392,7 +392,7 @@ public long ramBytesUsed() { final BytesRefBuilder spare = new BytesRefBuilder(); private char sepLabel; - public EscapingTokenStreamToAutomaton(char sepLabel) { + EscapingTokenStreamToAutomaton(char sepLabel) { this.sepLabel = sepLabel; } @@ -432,7 +432,7 @@ public long ramBytesUsed() { private final boolean hasPayloads; - public AnalyzingComparator(boolean hasPayloads) { + AnalyzingComparator(boolean hasPayloads) { this.hasPayloads = hasPayloads; } @@ -1114,7 +1114,7 @@ public long ramBytesUsed() { BytesRef payload; long weight; - public SurfaceFormAndPayload(BytesRef payload, long cost) { + SurfaceFormAndPayload(BytesRef payload, long cost) { super(); this.payload = payload; this.weight = cost; diff --git a/core/src/main/java/org/apache/lucene/search/uhighlight/CustomPassageFormatter.java b/core/src/main/java/org/apache/lucene/search/uhighlight/CustomPassageFormatter.java new file mode 100644 index 00000000000..7a34a805db6 --- /dev/null +++ b/core/src/main/java/org/apache/lucene/search/uhighlight/CustomPassageFormatter.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.lucene.search.uhighlight; + +import org.apache.lucene.search.highlight.Encoder; +import org.apache.lucene.search.highlight.Snippet; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightUtils; + +/** + * Custom passage formatter that allows us to: + * 1) extract different snippets (instead of a single big string) together with their scores ({@link Snippet}) + * 2) use the {@link Encoder} implementations that are already used with the other highlighters + */ +public class CustomPassageFormatter extends PassageFormatter { + + private final String preTag; + private final String postTag; + private final Encoder encoder; + + public CustomPassageFormatter(String preTag, String postTag, Encoder encoder) { + this.preTag = preTag; + this.postTag = postTag; + this.encoder = encoder; + } + + @Override + public Snippet[] format(Passage[] passages, String content) { + Snippet[] snippets = new Snippet[passages.length]; + int pos; + for (int j = 0; j < passages.length; j++) { + Passage passage = passages[j]; + StringBuilder sb = new StringBuilder(); + pos = passage.getStartOffset(); + for (int i = 0; i < passage.getNumMatches(); i++) { + int start = passage.getMatchStarts()[i]; + int end = passage.getMatchEnds()[i]; + // its possible to have overlapping terms + if (start > pos) { + append(sb, content, pos, start); + } + if (end > pos) { + sb.append(preTag); + append(sb, content, Math.max(pos, start), end); + sb.append(postTag); + pos = end; + } + } + // its possible a "term" from the analyzer could span a sentence boundary. + append(sb, content, pos, Math.max(pos, passage.getEndOffset())); + //we remove the paragraph separator if present at the end of the snippet (we used it as separator between values) + if (sb.charAt(sb.length() - 1) == HighlightUtils.PARAGRAPH_SEPARATOR) { + sb.deleteCharAt(sb.length() - 1); + } else if (sb.charAt(sb.length() - 1) == HighlightUtils.NULL_SEPARATOR) { + sb.deleteCharAt(sb.length() - 1); + } + //and we trim the snippets too + snippets[j] = new Snippet(sb.toString().trim(), passage.getScore(), passage.getNumMatches() > 0); + } + return snippets; + } + + private void append(StringBuilder dest, String content, int start, int end) { + dest.append(encoder.encodeText(content.substring(start, end))); + } +} diff --git a/core/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java b/core/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java new file mode 100644 index 00000000000..4f1ec5fdb83 --- /dev/null +++ b/core/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java @@ -0,0 +1,204 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.lucene.search.uhighlight; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.index.Term; +import org.apache.lucene.queries.CommonTermsQuery; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.highlight.Snippet; +import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; +import org.apache.lucene.search.spans.SpanNearQuery; +import org.apache.lucene.search.spans.SpanOrQuery; +import org.apache.lucene.search.spans.SpanQuery; +import org.apache.lucene.search.spans.SpanTermQuery; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.lucene.all.AllTermQuery; +import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; +import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery; +import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; + +import java.io.IOException; +import java.text.BreakIterator; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * Subclass of the {@link UnifiedHighlighter} that works for a single field in a single document. + * Uses a custom {@link PassageFormatter}. Accepts field content as a constructor + * argument, given that loadings field value can be done reading from _source field. + * Supports using different {@link BreakIterator} to break the text into fragments. Considers every distinct field + * value as a discrete passage for highlighting (unless the whole content needs to be highlighted). + * Supports both returning empty snippets and non highlighted snippets when no highlighting can be performed. + */ +public class CustomUnifiedHighlighter extends UnifiedHighlighter { + private static final Snippet[] EMPTY_SNIPPET = new Snippet[0]; + + private final String fieldValue; + private final PassageFormatter passageFormatter; + private final BreakIterator breakIterator; + private final boolean returnNonHighlightedSnippets; + + /** + * Creates a new instance of {@link CustomUnifiedHighlighter} + * + * @param analyzer the analyzer used for the field at index time, used for multi term queries internally + * @param passageFormatter our own {@link CustomPassageFormatter} + * which generates snippets in forms of {@link Snippet} objects + * @param breakIterator the {@link BreakIterator} to use for dividing text into passages. + * If null {@link BreakIterator#getSentenceInstance(Locale)} is used. + * @param fieldValue the original field values as constructor argument, loaded from the _source field or + * the relevant stored field. + * @param returnNonHighlightedSnippets whether non highlighted snippets should be + * returned rather than empty snippets when no highlighting can be performed + */ + public CustomUnifiedHighlighter(IndexSearcher searcher, + Analyzer analyzer, + PassageFormatter passageFormatter, + @Nullable BreakIterator breakIterator, + String fieldValue, + boolean returnNonHighlightedSnippets) { + super(searcher, analyzer); + this.breakIterator = breakIterator; + this.passageFormatter = passageFormatter; + this.fieldValue = fieldValue; + this.returnNonHighlightedSnippets = returnNonHighlightedSnippets; + } + + /** + * Highlights terms extracted from the provided query within the content of the provided field name + */ + public Snippet[] highlightField(String field, Query query, int docId, int maxPassages) throws IOException { + Map fieldsAsObjects = super.highlightFieldsAsObjects(new String[]{field}, query, + new int[]{docId}, new int[]{maxPassages}); + Object[] snippetObjects = fieldsAsObjects.get(field); + if (snippetObjects != null) { + //one single document at a time + assert snippetObjects.length == 1; + Object snippetObject = snippetObjects[0]; + if (snippetObject != null && snippetObject instanceof Snippet[]) { + return (Snippet[]) snippetObject; + } + } + return EMPTY_SNIPPET; + } + + @Override + protected List loadFieldValues(String[] fields, DocIdSetIterator docIter, + int cacheCharsThreshold) throws IOException { + //we only highlight one field, one document at a time + return Collections.singletonList(new String[]{fieldValue}); + } + + @Override + protected BreakIterator getBreakIterator(String field) { + if (breakIterator != null) { + return breakIterator; + } + return super.getBreakIterator(field); + } + + @Override + protected PassageFormatter getFormatter(String field) { + return passageFormatter; + } + + @Override + protected int getMaxNoHighlightPassages(String field) { + if (returnNonHighlightedSnippets) { + return 1; + } + return 0; + } + + @Override + protected Collection preMultiTermQueryRewrite(Query query) { + return rewriteCustomQuery(query); + } + + @Override + protected Collection preSpanQueryRewrite(Query query) { + return rewriteCustomQuery(query); + } + + + /** + * Translate custom queries in queries that are supported by the unified highlighter. + */ + private Collection rewriteCustomQuery(Query query) { + if (query instanceof MultiPhrasePrefixQuery) { + MultiPhrasePrefixQuery mpq = (MultiPhrasePrefixQuery) query; + Term[][] terms = mpq.getTerms(); + int[] positions = mpq.getPositions(); + SpanQuery[] positionSpanQueries = new SpanQuery[positions.length]; + int sizeMinus1 = terms.length - 1; + for (int i = 0; i < positions.length; i++) { + SpanQuery[] innerQueries = new SpanQuery[terms[i].length]; + for (int j = 0; j < terms[i].length; j++) { + if (i == sizeMinus1) { + innerQueries[j] = new SpanMultiTermQueryWrapper(new PrefixQuery(terms[i][j])); + } else { + innerQueries[j] = new SpanTermQuery(terms[i][j]); + } + } + if (innerQueries.length > 1) { + positionSpanQueries[i] = new SpanOrQuery(innerQueries); + } else { + positionSpanQueries[i] = innerQueries[0]; + } + } + // sum position increments beyond 1 + int positionGaps = 0; + if (positions.length >= 2) { + // positions are in increasing order. max(0,...) is just a safeguard. + positionGaps = Math.max(0, positions[positions.length - 1] - positions[0] - positions.length + 1); + } + + //if original slop is 0 then require inOrder + boolean inorder = (mpq.getSlop() == 0); + return Collections.singletonList(new SpanNearQuery(positionSpanQueries, + mpq.getSlop() + positionGaps, inorder)); + } else if (query instanceof CommonTermsQuery) { + CommonTermsQuery ctq = (CommonTermsQuery) query; + List tqs = new ArrayList<> (); + for (Term term : ctq.getTerms()) { + tqs.add(new TermQuery(term)); + } + return tqs; + } else if (query instanceof AllTermQuery) { + AllTermQuery atq = (AllTermQuery) query; + return Collections.singletonList(new TermQuery(atq.getTerm())); + } else if (query instanceof FunctionScoreQuery) { + return Collections.singletonList(((FunctionScoreQuery) query).getSubQuery()); + } else if (query instanceof FiltersFunctionScoreQuery) { + return Collections.singletonList(((FiltersFunctionScoreQuery) query).getSubQuery()); + } else { + return null; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index 97711eed427..389892a8652 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -21,6 +21,8 @@ package org.elasticsearch; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -34,45 +36,49 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.transport.TcpTransport; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VALUE; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureFieldName; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; /** * A base class for all elasticsearch exceptions. */ public class ElasticsearchException extends RuntimeException implements ToXContent, Writeable { - static final Version UNKNOWN_VERSION_ADDED = Version.fromId(0); + private static final Version UNKNOWN_VERSION_ADDED = Version.fromId(0); /** - * Passed in the {@link Params} of {@link #toXContent(XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params, Throwable)} + * Passed in the {@link Params} of {@link #generateThrowableXContent(XContentBuilder, Params, Throwable)} * to control if the {@code caused_by} element should render. Unlike most parameters to {@code toXContent} methods this parameter is * internal only and not available as a URL parameter. */ - public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip"; + private static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip"; /** - * Passed in the {@link Params} of {@link #toXContent(XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params, Throwable)} + * Passed in the {@link Params} of {@link #generateThrowableXContent(XContentBuilder, Params, Throwable)} * to control if the {@code stack_trace} element should render. Unlike most parameters to {@code toXContent} methods this parameter is * internal only and not available as a URL parameter. Use the {@code error_trace} parameter instead. */ public static final String REST_EXCEPTION_SKIP_STACK_TRACE = "rest.exception.stacktrace.skip"; public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true; - public static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false; - private static final String INDEX_HEADER_KEY = "es.index"; - private static final String INDEX_HEADER_KEY_UUID = "es.index_uuid"; - private static final String SHARD_HEADER_KEY = "es.shard"; - private static final String RESOURCE_HEADER_TYPE_KEY = "es.resource.type"; - private static final String RESOURCE_HEADER_ID_KEY = "es.resource.id"; + private static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false; + private static final String INDEX_METADATA_KEY = "es.index"; + private static final String INDEX_METADATA_KEY_UUID = "es.index_uuid"; + private static final String SHARD_METADATA_KEY = "es.shard"; + private static final String RESOURCE_METADATA_TYPE_KEY = "es.resource.type"; + private static final String RESOURCE_METADATA_ID_KEY = "es.resource.id"; private static final String TYPE = "type"; private static final String REASON = "reason"; @@ -82,8 +88,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte private static final String ERROR = "error"; private static final String ROOT_CAUSE = "root_cause"; - private static final Map> ID_TO_SUPPLIER; + private static final Map> ID_TO_SUPPLIER; private static final Map, ElasticsearchExceptionHandle> CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE; + private final Map> metadata = new HashMap<>(); private final Map> headers = new HashMap<>(); /** @@ -125,14 +132,56 @@ public class ElasticsearchException extends RuntimeException implements ToXConte super(in.readOptionalString(), in.readException()); readStackTrace(this, in); headers.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); + if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) { + metadata.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); + } else { + for (Iterator>> iterator = headers.entrySet().iterator(); iterator.hasNext(); ) { + Map.Entry> header = iterator.next(); + if (header.getKey().startsWith("es.")) { + metadata.put(header.getKey(), header.getValue()); + iterator.remove(); + } + } + } } /** - * Adds a new header with the given key. - * This method will replace existing header if a header with the same key already exists + * Adds a new piece of metadata with the given key. + * If the provided key is already present, the corresponding metadata will be replaced */ - public void addHeader(String key, String... value) { - this.headers.put(key, Arrays.asList(value)); + public void addMetadata(String key, String... values) { + addMetadata(key, Arrays.asList(values)); + } + + /** + * Adds a new piece of metadata with the given key. + * If the provided key is already present, the corresponding metadata will be replaced + */ + public void addMetadata(String key, List values) { + //we need to enforce this otherwise bw comp doesn't work properly, as "es." was the previous criteria to split headers in two sets + if (key.startsWith("es.") == false) { + throw new IllegalArgumentException("exception metadata must start with [es.], found [" + key + "] instead"); + } + this.metadata.put(key, values); + } + + /** + * Returns a set of all metadata keys on this exception + */ + public Set getMetadataKeys() { + return metadata.keySet(); + } + + /** + * Returns the list of metadata values for the given key or {@code null} if no metadata for the + * given key exists. + */ + public List getMetadata(String key) { + return metadata.get(key); + } + + protected Map> getMetadata() { + return metadata; } /** @@ -140,9 +189,20 @@ public class ElasticsearchException extends RuntimeException implements ToXConte * This method will replace existing header if a header with the same key already exists */ public void addHeader(String key, List value) { + //we need to enforce this otherwise bw comp doesn't work properly, as "es." was the previous criteria to split headers in two sets + if (key.startsWith("es.")) { + throw new IllegalArgumentException("exception headers must not start with [es.], found [" + key + "] instead"); + } this.headers.put(key, value); } + /** + * Adds a new header with the given key. + * This method will replace existing header if a header with the same key already exists + */ + public void addHeader(String key, String... value) { + addHeader(key, Arrays.asList(value)); + } /** * Returns a set of all header keys on this exception @@ -152,13 +212,17 @@ public class ElasticsearchException extends RuntimeException implements ToXConte } /** - * Returns the list of header values for the given key or {@code null} if not header for the + * Returns the list of header values for the given key or {@code null} if no header for the * given key exists. */ public List getHeader(String key) { return headers.get(key); } + protected Map> getHeaders() { + return headers; + } + /** * Returns the rest status code associated with this exception. */ @@ -219,11 +283,19 @@ public class ElasticsearchException extends RuntimeException implements ToXConte out.writeOptionalString(this.getMessage()); out.writeException(this.getCause()); writeStackTraces(this, out); - out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); + if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) { + out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString); + } else { + HashMap> finalHeaders = new HashMap<>(headers.size() + metadata.size()); + finalHeaders.putAll(headers); + finalHeaders.putAll(metadata); + out.writeMapOfLists(finalHeaders, StreamOutput::writeString, StreamOutput::writeString); + } } public static ElasticsearchException readException(StreamInput input, int id) throws IOException { - FunctionThatThrowsIOException elasticsearchException = ID_TO_SUPPLIER.get(id); + CheckedFunction elasticsearchException = ID_TO_SUPPLIER.get(id); if (elasticsearchException == null) { throw new IllegalStateException("unknown exception for id: " + id); } @@ -256,64 +328,51 @@ public class ElasticsearchException extends RuntimeException implements ToXConte public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { Throwable ex = ExceptionsHelper.unwrapCause(this); if (ex != this) { - toXContent(builder, params, this); + generateThrowableXContent(builder, params, this); } else { - builder.field(TYPE, getExceptionName()); - builder.field(REASON, getMessage()); - for (String key : headers.keySet()) { - if (key.startsWith("es.")) { - List values = headers.get(key); - xContentHeader(builder, key.substring("es.".length()), values); - } - } - innerToXContent(builder, params); - renderHeader(builder, params); - if (params.paramAsBoolean(REST_EXCEPTION_SKIP_STACK_TRACE, REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT) == false) { - builder.field(STACK_TRACE, ExceptionsHelper.stackTrace(this)); - } + innerToXContent(builder, params, this, getExceptionName(), getMessage(), headers, metadata, getCause()); } return builder; } - /** - * Renders additional per exception information into the xcontent - */ - protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { - causeToXContent(builder, params); - } + protected static void innerToXContent(XContentBuilder builder, Params params, + Throwable throwable, String type, String message, Map> headers, + Map> metadata, Throwable cause) throws IOException { + builder.field(TYPE, type); + builder.field(REASON, message); - /** - * Renders a cause exception as xcontent - */ - protected void causeToXContent(XContentBuilder builder, Params params) throws IOException { - final Throwable cause = getCause(); - if (cause != null && params.paramAsBoolean(REST_EXCEPTION_SKIP_CAUSE, REST_EXCEPTION_SKIP_CAUSE_DEFAULT) == false) { - builder.field(CAUSED_BY); - builder.startObject(); - toXContent(builder, params, cause); + for (Map.Entry> entry : metadata.entrySet()) { + headerToXContent(builder, entry.getKey().substring("es.".length()), entry.getValue()); + } + + if (throwable instanceof ElasticsearchException) { + ElasticsearchException exception = (ElasticsearchException) throwable; + exception.metadataToXContent(builder, params); + } + + if (params.paramAsBoolean(REST_EXCEPTION_SKIP_CAUSE, REST_EXCEPTION_SKIP_CAUSE_DEFAULT) == false) { + if (cause != null) { + builder.field(CAUSED_BY); + builder.startObject(); + generateThrowableXContent(builder, params, cause); + builder.endObject(); + } + } + + if (headers.isEmpty() == false) { + builder.startObject(HEADER); + for (Map.Entry> entry : headers.entrySet()) { + headerToXContent(builder, entry.getKey(), entry.getValue()); + } builder.endObject(); } - } - protected final void renderHeader(XContentBuilder builder, Params params) throws IOException { - boolean hasHeader = false; - for (String key : headers.keySet()) { - if (key.startsWith("es.")) { - continue; - } - if (hasHeader == false) { - builder.startObject(HEADER); - hasHeader = true; - } - List values = headers.get(key); - xContentHeader(builder, key, values); - } - if (hasHeader) { - builder.endObject(); + if (params.paramAsBoolean(REST_EXCEPTION_SKIP_STACK_TRACE, REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT) == false) { + builder.field(STACK_TRACE, ExceptionsHelper.stackTrace(throwable)); } } - private void xContentHeader(XContentBuilder builder, String key, List values) throws IOException { + private static void headerToXContent(XContentBuilder builder, String key, List values) throws IOException { if (values != null && values.isEmpty() == false) { if (values.size() == 1) { builder.field(key, values.get(0)); @@ -328,25 +387,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte } /** - * Static toXContent helper method that also renders non {@link org.elasticsearch.ElasticsearchException} instances as XContent. + * Renders additional per exception information into the XContent */ - public static void toXContent(XContentBuilder builder, Params params, Throwable ex) throws IOException { - ex = ExceptionsHelper.unwrapCause(ex); - if (ex instanceof ElasticsearchException) { - ((ElasticsearchException) ex).toXContent(builder, params); - } else { - builder.field(TYPE, getExceptionName(ex)); - builder.field(REASON, ex.getMessage()); - if (ex.getCause() != null) { - builder.field(CAUSED_BY); - builder.startObject(); - toXContent(builder, params, ex.getCause()); - builder.endObject(); - } - if (params.paramAsBoolean(REST_EXCEPTION_SKIP_STACK_TRACE, REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT) == false) { - builder.field(STACK_TRACE, ExceptionsHelper.stackTrace(ex)); - } - } + protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException { } /** @@ -357,15 +400,25 @@ public class ElasticsearchException extends RuntimeException implements ToXConte * instances. */ public static ElasticsearchException fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token = ensureFieldName(parser.nextToken(), parser::getTokenLocation); + XContentParser.Token token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); + return innerFromXContent(parser, false); + } + + private static ElasticsearchException innerFromXContent(XContentParser parser, boolean parseRootCauses) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); String type = null, reason = null, stack = null; ElasticsearchException cause = null; - Map headers = new HashMap<>(); + Map> metadata = new HashMap<>(); + Map> headers = new HashMap<>(); + List rootCauses = new ArrayList<>(); - do { + for (; token == XContentParser.Token.FIELD_NAME; token = parser.nextToken()) { String currentFieldName = parser.currentName(); token = parser.nextToken(); + if (token.isValue()) { if (TYPE.equals(currentFieldName)) { type = parser.text(); @@ -373,36 +426,173 @@ public class ElasticsearchException extends RuntimeException implements ToXConte reason = parser.text(); } else if (STACK_TRACE.equals(currentFieldName)) { stack = parser.text(); - } else { - // Everything else is considered as a header - headers.put(currentFieldName, parser.text()); + } else if (token == XContentParser.Token.VALUE_STRING) { + metadata.put(currentFieldName, Collections.singletonList(parser.text())); } } else if (token == XContentParser.Token.START_OBJECT) { if (CAUSED_BY.equals(currentFieldName)) { cause = fromXContent(parser); } else if (HEADER.equals(currentFieldName)) { - headers.putAll(parser.map()); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + List values = headers.getOrDefault(currentFieldName, new ArrayList<>()); + if (token == XContentParser.Token.VALUE_STRING) { + values.add(parser.text()); + } else if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + values.add(parser.text()); + } else { + parser.skipChildren(); + } + } + } else if (token == XContentParser.Token.START_OBJECT) { + parser.skipChildren(); + } + headers.put(currentFieldName, values); + } + } } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + // Any additional metadata object added by the metadataToXContent method is ignored + // and skipped, so that the parser does not fail on unknown fields. The parser only + // support metadata key-pairs and metadata arrays of values. + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (parseRootCauses && ROOT_CAUSE.equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + rootCauses.add(fromXContent(parser)); + } + } else { + // Parse the array and add each item to the corresponding list of metadata. + // Arrays of objects are not supported yet and just ignored and skipped. + List values = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + values.add(parser.text()); + } else { + parser.skipChildren(); + } + } + if (values.size() > 0) { + if (metadata.containsKey(currentFieldName)) { + values.addAll(metadata.get(currentFieldName)); + } + metadata.put(currentFieldName, values); + } } } - } while ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME); - - StringBuilder message = new StringBuilder("Elasticsearch exception ["); - message.append(TYPE).append('=').append(type).append(", "); - message.append(REASON).append('=').append(reason); - if (stack != null) { - message.append(", ").append(STACK_TRACE).append('=').append(stack); } - message.append(']'); - ElasticsearchException e = new ElasticsearchException(message.toString(), cause); - for (Map.Entry header : headers.entrySet()) { - e.addHeader(header.getKey(), String.valueOf(header.getValue())); + ElasticsearchException e = new ElasticsearchException(buildMessage(type, reason, stack), cause); + for (Map.Entry> entry : metadata.entrySet()) { + //subclasses can print out additional metadata through the metadataToXContent method. Simple key-value pairs will be + //parsed back and become part of this metadata set, while objects and arrays are not supported when parsing back. + //Those key-value pairs become part of the metadata set and inherit the "es." prefix as that is currently required + //by addMetadata. The prefix will get stripped out when printing metadata out so it will be effectively invisible. + //TODO move subclasses that print out simple metadata to using addMetadata directly and support also numbers and booleans. + //TODO rename metadataToXContent and have only SearchPhaseExecutionException use it, which prints out complex objects + e.addMetadata("es." + entry.getKey(), entry.getValue()); + } + for (Map.Entry> header : headers.entrySet()) { + e.addHeader(header.getKey(), header.getValue()); + } + + // Adds root causes as suppressed exception. This way they are not lost + // after parsing and can be retrieved using getSuppressed() method. + for (ElasticsearchException rootCause : rootCauses) { + e.addSuppressed(rootCause); } return e; } + /** + * Static toXContent helper method that renders {@link org.elasticsearch.ElasticsearchException} or {@link Throwable} instances + * as XContent, delegating the rendering to {@link #toXContent(XContentBuilder, Params)} + * or {@link #innerToXContent(XContentBuilder, Params, Throwable, String, String, Map, Map, Throwable)}. + * + * This method is usually used when the {@link Throwable} is rendered as a part of another XContent object, and its result can + * be parsed back using the {@link #fromXContent(XContentParser)} method. + */ + public static void generateThrowableXContent(XContentBuilder builder, Params params, Throwable t) throws IOException { + t = ExceptionsHelper.unwrapCause(t); + + if (t instanceof ElasticsearchException) { + ((ElasticsearchException) t).toXContent(builder, params); + } else { + innerToXContent(builder, params, t, getExceptionName(t), t.getMessage(), emptyMap(), emptyMap(), t.getCause()); + } + } + + /** + * Render any exception as a xcontent, encapsulated within a field or object named "error". The level of details that are rendered + * depends on the value of the "detailed" parameter: when it's false only a simple message based on the type and message of the + * exception is rendered. When it's true all detail are provided including guesses root causes, cause and potentially stack + * trace. + * + * This method is usually used when the {@link Exception} is rendered as a full XContent object, and its output can be parsed + * by the {@link #failureFromXContent(XContentParser)} method. + */ + public static void generateFailureXContent(XContentBuilder builder, Params params, @Nullable Exception e, boolean detailed) + throws IOException { + // No exception to render as an error + if (e == null) { + builder.field(ERROR, "unknown"); + return; + } + + // Render the exception with a simple message + if (detailed == false) { + String message = "No ElasticsearchException found"; + Throwable t = e; + for (int counter = 0; counter < 10 && t != null; counter++) { + if (t instanceof ElasticsearchException) { + message = t.getClass().getSimpleName() + "[" + t.getMessage() + "]"; + break; + } + t = t.getCause(); + } + builder.field(ERROR, message); + return; + } + + // Render the exception with all details + final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(e); + builder.startObject(ERROR); + { + builder.startArray(ROOT_CAUSE); + for (ElasticsearchException rootCause : rootCauses) { + builder.startObject(); + rootCause.toXContent(builder, new DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_CAUSE, "true"), params)); + builder.endObject(); + } + builder.endArray(); + } + generateThrowableXContent(builder, params, e); + builder.endObject(); + } + + /** + * Parses the output of {@link #generateFailureXContent(XContentBuilder, Params, Exception, boolean)} + */ + public static ElasticsearchException failureFromXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureFieldName(parser, token, ERROR); + + token = parser.nextToken(); + if (token.isValue()) { + return new ElasticsearchException(buildMessage("exception", parser.text(), null)); + } + + ensureExpectedToken(token, XContentParser.Token.START_OBJECT, parser::getTokenLocation); + token = parser.nextToken(); + + // Root causes are parsed in the innerFromXContent() and are added as suppressed exceptions. + return innerFromXContent(parser, true); + } + /** * Returns the root cause of this exception or multiple if different shards caused different exceptions */ @@ -448,12 +638,23 @@ public class ElasticsearchException extends RuntimeException implements ToXConte return toUnderscoreCase(simpleName); } + static String buildMessage(String type, String reason, String stack) { + StringBuilder message = new StringBuilder("Elasticsearch exception ["); + message.append(TYPE).append('=').append(type).append(", "); + message.append(REASON).append('=').append(reason); + if (stack != null) { + message.append(", ").append(STACK_TRACE).append('=').append(stack); + } + message.append(']'); + return message.toString(); + } + @Override public String toString() { StringBuilder builder = new StringBuilder(); - if (headers.containsKey(INDEX_HEADER_KEY)) { + if (metadata.containsKey(INDEX_METADATA_KEY)) { builder.append(getIndex()); - if (headers.containsKey(SHARD_HEADER_KEY)) { + if (metadata.containsKey(SHARD_METADATA_KEY)) { builder.append('[').append(getShardId()).append(']'); } builder.append(' '); @@ -752,8 +953,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.search.SearchContextException::new, 127, UNKNOWN_VERSION_ADDED), SEARCH_SOURCE_BUILDER_EXCEPTION(org.elasticsearch.search.builder.SearchSourceBuilderException.class, org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128, UNKNOWN_VERSION_ADDED), - ENGINE_CLOSED_EXCEPTION(org.elasticsearch.index.engine.EngineClosedException.class, - org.elasticsearch.index.engine.EngineClosedException::new, 129, UNKNOWN_VERSION_ADDED), + // 129 was EngineClosedException NO_SHARD_AVAILABLE_ACTION_EXCEPTION(org.elasticsearch.action.NoShardAvailableActionException.class, org.elasticsearch.action.NoShardAvailableActionException::new, 130, UNKNOWN_VERSION_ADDED), UNAVAILABLE_SHARDS_EXCEPTION(org.elasticsearch.action.UnavailableShardsException.class, @@ -786,15 +986,17 @@ public class ElasticsearchException extends RuntimeException implements ToXConte TASK_CANCELLED_EXCEPTION(org.elasticsearch.tasks.TaskCancelledException.class, org.elasticsearch.tasks.TaskCancelledException::new, 146, Version.V_5_1_1_UNRELEASED), SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class, - org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2); + org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2), + UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException.class, + org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0_UNRELEASED); final Class exceptionClass; - final FunctionThatThrowsIOException constructor; + final CheckedFunction constructor; final int id; final Version versionAdded; ElasticsearchExceptionHandle(Class exceptionClass, - FunctionThatThrowsIOException constructor, int id, + CheckedFunction constructor, int id, Version versionAdded) { // We need the exceptionClass because you can't dig it out of the constructor reliably. this.exceptionClass = exceptionClass; @@ -812,9 +1014,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte } public Index getIndex() { - List index = getHeader(INDEX_HEADER_KEY); + List index = getMetadata(INDEX_METADATA_KEY); if (index != null && index.isEmpty() == false) { - List index_uuid = getHeader(INDEX_HEADER_KEY_UUID); + List index_uuid = getMetadata(INDEX_METADATA_KEY_UUID); return new Index(index.get(0), index_uuid.get(0)); } @@ -822,7 +1024,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte } public ShardId getShardId() { - List shard = getHeader(SHARD_HEADER_KEY); + List shard = getMetadata(SHARD_METADATA_KEY); if (shard != null && shard.isEmpty() == false) { return new ShardId(getIndex(), Integer.parseInt(shard.get(0))); } @@ -831,8 +1033,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte public void setIndex(Index index) { if (index != null) { - addHeader(INDEX_HEADER_KEY, index.getName()); - addHeader(INDEX_HEADER_KEY_UUID, index.getUUID()); + addMetadata(INDEX_METADATA_KEY, index.getName()); + addMetadata(INDEX_METADATA_KEY_UUID, index.getUUID()); } } @@ -845,27 +1047,22 @@ public class ElasticsearchException extends RuntimeException implements ToXConte public void setShard(ShardId shardId) { if (shardId != null) { setIndex(shardId.getIndex()); - addHeader(SHARD_HEADER_KEY, Integer.toString(shardId.id())); + addMetadata(SHARD_METADATA_KEY, Integer.toString(shardId.id())); } } - public void setShard(String index, int shardId) { - setIndex(index); - addHeader(SHARD_HEADER_KEY, Integer.toString(shardId)); - } - public void setResources(String type, String... id) { assert type != null; - addHeader(RESOURCE_HEADER_ID_KEY, id); - addHeader(RESOURCE_HEADER_TYPE_KEY, type); + addMetadata(RESOURCE_METADATA_ID_KEY, id); + addMetadata(RESOURCE_METADATA_TYPE_KEY, type); } public List getResourceId() { - return getHeader(RESOURCE_HEADER_ID_KEY); + return getMetadata(RESOURCE_METADATA_ID_KEY); } public String getResourceType() { - List header = getHeader(RESOURCE_HEADER_TYPE_KEY); + List header = getMetadata(RESOURCE_METADATA_TYPE_KEY); if (header != null && header.isEmpty() == false) { assert header.size() == 1; return header.get(0); @@ -873,26 +1070,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte return null; } - public static void renderException(XContentBuilder builder, Params params, Exception e) throws IOException { - builder.startObject(ERROR); - final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(e); - builder.field(ROOT_CAUSE); - builder.startArray(); - for (ElasticsearchException rootCause : rootCauses) { - builder.startObject(); - rootCause.toXContent(builder, new ToXContent.DelegatingMapParams( - Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params)); - builder.endObject(); - } - builder.endArray(); - ElasticsearchException.toXContent(builder, params, e); - builder.endObject(); - } - - interface FunctionThatThrowsIOException { - R apply(T t) throws IOException; - } - // lower cases and adds underscores to transitions in a name private static String toUnderscoreCase(String value) { StringBuilder sb = new StringBuilder(); diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchParseException.java b/core/src/main/java/org/elasticsearch/ElasticsearchParseException.java index cdccdb8da95..1711e9a3aaf 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchParseException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchParseException.java @@ -24,6 +24,9 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; +/** + * Unchecked exception that is translated into a {@code 400 BAD REQUEST} error when it bubbles out over HTTP. + */ public class ElasticsearchParseException extends ElasticsearchException { public ElasticsearchParseException(String msg, Object... args) { diff --git a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java index c30662a0934..e89e04a301d 100644 --- a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -214,7 +214,7 @@ public final class ExceptionsHelper { final String index; final Class causeType; - public GroupBy(Throwable t) { + GroupBy(Throwable t) { if (t instanceof ElasticsearchException) { final Index index = ((ElasticsearchException) t).getIndex(); if (index != null) { diff --git a/core/src/main/java/org/elasticsearch/SpecialPermission.java b/core/src/main/java/org/elasticsearch/SpecialPermission.java index 7d796346c64..9e5571a5b0a 100644 --- a/core/src/main/java/org/elasticsearch/SpecialPermission.java +++ b/core/src/main/java/org/elasticsearch/SpecialPermission.java @@ -57,6 +57,9 @@ import java.security.BasicPermission; * */ public final class SpecialPermission extends BasicPermission { + + public static final SpecialPermission INSTANCE = new SpecialPermission(); + /** * Creates a new SpecialPermision object. */ @@ -76,4 +79,14 @@ public final class SpecialPermission extends BasicPermission { public SpecialPermission(String name, String actions) { this(); } + + /** + * Check that the current stack has {@link SpecialPermission} access according to the {@link SecurityManager}. + */ + public static void check() { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(INSTANCE); + } + } } diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 474b220a9c5..ccd6f7c9a06 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -29,7 +29,7 @@ import org.elasticsearch.monitor.jvm.JvmInfo; import java.io.IOException; -public class Version { +public class Version implements Comparable { /* * The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA * values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 @@ -73,6 +73,10 @@ public class Version { public static final Version V_2_4_2 = new Version(V_2_4_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); public static final int V_2_4_3_ID = 2040399; public static final Version V_2_4_3 = new Version(V_2_4_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); + public static final int V_2_4_4_ID = 2040499; + public static final Version V_2_4_4 = new Version(V_2_4_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); + public static final int V_2_4_5_ID = 2040599; + public static final Version V_2_4_5 = new Version(V_2_4_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); public static final int V_5_0_0_alpha1_ID = 5000001; public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); public static final int V_5_0_0_alpha2_ID = 5000002; @@ -100,8 +104,14 @@ public class Version { public static final Version V_5_1_1_UNRELEASED = new Version(V_5_1_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0); public static final int V_5_1_2_ID_UNRELEASED = 5010299; public static final Version V_5_1_2_UNRELEASED = new Version(V_5_1_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0); + public static final int V_5_1_3_ID_UNRELEASED = 5010399; + public static final Version V_5_1_3_UNRELEASED = new Version(V_5_1_3_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0); public static final int V_5_2_0_ID_UNRELEASED = 5020099; - public static final Version V_5_2_0_UNRELEASED = new Version(V_5_2_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0); + public static final Version V_5_2_0_UNRELEASED = new Version(V_5_2_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0); + public static final int V_5_2_1_ID_UNRELEASED = 5020199; + public static final Version V_5_2_1_UNRELEASED = new Version(V_5_2_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0); + public static final int V_5_3_0_ID_UNRELEASED = 5030099; + public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0); public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001; public static final Version V_6_0_0_alpha1_UNRELEASED = new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0); @@ -122,8 +132,14 @@ public class Version { switch (id) { case V_6_0_0_alpha1_ID_UNRELEASED: return V_6_0_0_alpha1_UNRELEASED; + case V_5_3_0_ID_UNRELEASED: + return V_5_3_0_UNRELEASED; + case V_5_2_1_ID_UNRELEASED: + return V_5_2_1_UNRELEASED; case V_5_2_0_ID_UNRELEASED: return V_5_2_0_UNRELEASED; + case V_5_1_3_ID_UNRELEASED: + return V_5_1_3_UNRELEASED; case V_5_1_2_ID_UNRELEASED: return V_5_1_2_UNRELEASED; case V_5_1_1_ID_UNRELEASED: @@ -150,6 +166,10 @@ public class Version { return V_5_0_0_alpha2; case V_5_0_0_alpha1_ID: return V_5_0_0_alpha1; + case V_2_4_5_ID: + return V_2_4_5; + case V_2_4_4_ID: + return V_2_4_4; case V_2_4_3_ID: return V_2_4_3; case V_2_4_2_ID: @@ -310,6 +330,11 @@ public class Version { return version.id >= id; } + @Override + public int compareTo(Version other) { + return Integer.compare(this.id, other.id); + } + /** * Returns the minimum compatible version based on the current * version. Ie a node needs to have at least the return version in order diff --git a/core/src/main/java/org/elasticsearch/action/ActionListener.java b/core/src/main/java/org/elasticsearch/action/ActionListener.java index ef26867600e..f9fafa9f95a 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/core/src/main/java/org/elasticsearch/action/ActionListener.java @@ -19,8 +19,11 @@ package org.elasticsearch.action; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.CheckedConsumer; +import java.util.ArrayList; +import java.util.List; import java.util.function.Consumer; /** @@ -65,4 +68,41 @@ public interface ActionListener { } }; } + + /** + * Notifies every given listener with the response passed to {@link #onResponse(Object)}. If a listener itself throws an exception + * the exception is forwarded to {@link #onFailure(Exception)}. If in turn {@link #onFailure(Exception)} fails all remaining + * listeners will be processed and the caught exception will be re-thrown. + */ + static void onResponse(Iterable> listeners, Response response) { + List exceptionList = new ArrayList<>(); + for (ActionListener listener : listeners) { + try { + listener.onResponse(response); + } catch (Exception ex) { + try { + listener.onFailure(ex); + } catch (Exception ex1) { + exceptionList.add(ex1); + } + } + } + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptionList); + } + + /** + * Notifies every given listener with the failure passed to {@link #onFailure(Exception)}. If a listener itself throws an exception + * all remaining listeners will be processed and the caught exception will be re-thrown. + */ + static void onFailure(Iterable> listeners, Exception failure) { + List exceptionList = new ArrayList<>(); + for (ActionListener listener : listeners) { + try { + listener.onFailure(failure); + } catch (Exception ex) { + exceptionList.add(ex); + } + } + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptionList); + } } diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index a24ed5f8083..4014c832b0f 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -19,14 +19,6 @@ package org.elasticsearch.action; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.UnaryOperator; -import java.util.stream.Collectors; - import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction; @@ -196,15 +188,19 @@ import org.elasticsearch.action.termvectors.TransportShardMultiTermsVectorAction import org.elasticsearch.action.termvectors.TransportTermVectorsAction; import org.elasticsearch.action.update.TransportUpdateAction; import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.NamedRegistry; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.MapBinder; import org.elasticsearch.common.inject.multibindings.Multibinder; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ActionPlugin.ActionHandler; import org.elasticsearch.rest.RestController; @@ -312,6 +308,15 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.rest.action.search.RestSearchScrollAction; import org.elasticsearch.threadpool.ThreadPool; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; + import static java.util.Collections.unmodifiableList; import static java.util.Collections.unmodifiableMap; @@ -324,6 +329,10 @@ public class ActionModule extends AbstractModule { private final boolean transportClient; private final Settings settings; + private final IndexNameExpressionResolver indexNameExpressionResolver; + private final IndexScopedSettings indexScopedSettings; + private final ClusterSettings clusterSettings; + private final SettingsFilter settingsFilter; private final List actionPlugins; private final Map> actions; private final List> actionFilters; @@ -331,14 +340,20 @@ public class ActionModule extends AbstractModule { private final DestructiveOperations destructiveOperations; private final RestController restController; - public ActionModule(boolean transportClient, Settings settings, IndexNameExpressionResolver resolver, - ClusterSettings clusterSettings, ThreadPool threadPool, List actionPlugins) { + public ActionModule(boolean transportClient, Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, + IndexScopedSettings indexScopedSettings, ClusterSettings clusterSettings, SettingsFilter settingsFilter, + ThreadPool threadPool, List actionPlugins, NodeClient nodeClient, + CircuitBreakerService circuitBreakerService) { this.transportClient = transportClient; this.settings = settings; + this.indexNameExpressionResolver = indexNameExpressionResolver; + this.indexScopedSettings = indexScopedSettings; + this.clusterSettings = clusterSettings; + this.settingsFilter = settingsFilter; this.actionPlugins = actionPlugins; actions = setupActions(actionPlugins); actionFilters = setupActionFilters(actionPlugins); - autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, clusterSettings, resolver); + autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver); destructiveOperations = new DestructiveOperations(settings, clusterSettings); Set headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet()); UnaryOperator restWrapper = null; @@ -352,9 +367,14 @@ public class ActionModule extends AbstractModule { restWrapper = newRestWrapper; } } - restController = new RestController(settings, headers, restWrapper); + if (transportClient) { + restController = null; + } else { + restController = new RestController(settings, headers, restWrapper, nodeClient, circuitBreakerService); + } } + public Map> getActions() { return actions; } @@ -362,7 +382,7 @@ public class ActionModule extends AbstractModule { static Map> setupActions(List actionPlugins) { // Subclass NamedRegistry for easy registration class ActionRegistry extends NamedRegistry> { - public ActionRegistry() { + ActionRegistry() { super("action"); } @@ -478,147 +498,145 @@ public class ActionModule extends AbstractModule { return unmodifiableList(actionPlugins.stream().flatMap(p -> p.getActionFilters().stream()).collect(Collectors.toList())); } - static Set> setupRestHandlers(List actionPlugins) { - Set> handlers = new HashSet<>(); - registerRestHandler(handlers, RestMainAction.class); - registerRestHandler(handlers, RestNodesInfoAction.class); - registerRestHandler(handlers, RestNodesStatsAction.class); - registerRestHandler(handlers, RestNodesHotThreadsAction.class); - registerRestHandler(handlers, RestClusterAllocationExplainAction.class); - registerRestHandler(handlers, RestClusterStatsAction.class); - registerRestHandler(handlers, RestClusterStateAction.class); - registerRestHandler(handlers, RestClusterHealthAction.class); - registerRestHandler(handlers, RestClusterUpdateSettingsAction.class); - registerRestHandler(handlers, RestClusterGetSettingsAction.class); - registerRestHandler(handlers, RestClusterRerouteAction.class); - registerRestHandler(handlers, RestClusterSearchShardsAction.class); - registerRestHandler(handlers, RestPendingClusterTasksAction.class); - registerRestHandler(handlers, RestPutRepositoryAction.class); - registerRestHandler(handlers, RestGetRepositoriesAction.class); - registerRestHandler(handlers, RestDeleteRepositoryAction.class); - registerRestHandler(handlers, RestVerifyRepositoryAction.class); - registerRestHandler(handlers, RestGetSnapshotsAction.class); - registerRestHandler(handlers, RestCreateSnapshotAction.class); - registerRestHandler(handlers, RestRestoreSnapshotAction.class); - registerRestHandler(handlers, RestDeleteSnapshotAction.class); - registerRestHandler(handlers, RestSnapshotsStatusAction.class); + public void initRestHandlers(Supplier nodesInCluster) { + List catActions = new ArrayList<>(); + Consumer registerHandler = a -> { + if (a instanceof AbstractCatAction) { + catActions.add((AbstractCatAction) a); + } + }; + registerHandler.accept(new RestMainAction(settings, restController)); + registerHandler.accept(new RestNodesInfoAction(settings, restController, settingsFilter)); + registerHandler.accept(new RestNodesStatsAction(settings, restController)); + registerHandler.accept(new RestNodesHotThreadsAction(settings, restController)); + registerHandler.accept(new RestClusterAllocationExplainAction(settings, restController)); + registerHandler.accept(new RestClusterStatsAction(settings, restController)); + registerHandler.accept(new RestClusterStateAction(settings, restController, settingsFilter)); + registerHandler.accept(new RestClusterHealthAction(settings, restController)); + registerHandler.accept(new RestClusterUpdateSettingsAction(settings, restController)); + registerHandler.accept(new RestClusterGetSettingsAction(settings, restController, clusterSettings, settingsFilter)); + registerHandler.accept(new RestClusterRerouteAction(settings, restController, settingsFilter)); + registerHandler.accept(new RestClusterSearchShardsAction(settings, restController)); + registerHandler.accept(new RestPendingClusterTasksAction(settings, restController)); + registerHandler.accept(new RestPutRepositoryAction(settings, restController)); + registerHandler.accept(new RestGetRepositoriesAction(settings, restController, settingsFilter)); + registerHandler.accept(new RestDeleteRepositoryAction(settings, restController)); + registerHandler.accept(new RestVerifyRepositoryAction(settings, restController)); + registerHandler.accept(new RestGetSnapshotsAction(settings, restController)); + registerHandler.accept(new RestCreateSnapshotAction(settings, restController)); + registerHandler.accept(new RestRestoreSnapshotAction(settings, restController)); + registerHandler.accept(new RestDeleteSnapshotAction(settings, restController)); + registerHandler.accept(new RestSnapshotsStatusAction(settings, restController)); - registerRestHandler(handlers, RestIndicesExistsAction.class); - registerRestHandler(handlers, RestTypesExistsAction.class); - registerRestHandler(handlers, RestGetIndicesAction.class); - registerRestHandler(handlers, RestIndicesStatsAction.class); - registerRestHandler(handlers, RestIndicesSegmentsAction.class); - registerRestHandler(handlers, RestIndicesShardStoresAction.class); - registerRestHandler(handlers, RestGetAliasesAction.class); - registerRestHandler(handlers, RestAliasesExistAction.class); - registerRestHandler(handlers, RestIndexDeleteAliasesAction.class); - registerRestHandler(handlers, RestIndexPutAliasAction.class); - registerRestHandler(handlers, RestIndicesAliasesAction.class); - registerRestHandler(handlers, RestCreateIndexAction.class); - registerRestHandler(handlers, RestShrinkIndexAction.class); - registerRestHandler(handlers, RestRolloverIndexAction.class); - registerRestHandler(handlers, RestDeleteIndexAction.class); - registerRestHandler(handlers, RestCloseIndexAction.class); - registerRestHandler(handlers, RestOpenIndexAction.class); + registerHandler.accept(new RestIndicesExistsAction(settings, restController)); + registerHandler.accept(new RestTypesExistsAction(settings, restController)); + registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter)); + registerHandler.accept(new RestIndicesStatsAction(settings, restController)); + registerHandler.accept(new RestIndicesSegmentsAction(settings, restController)); + registerHandler.accept(new RestIndicesShardStoresAction(settings, restController)); + registerHandler.accept(new RestGetAliasesAction(settings, restController)); + registerHandler.accept(new RestAliasesExistAction(settings, restController)); + registerHandler.accept(new RestIndexDeleteAliasesAction(settings, restController)); + registerHandler.accept(new RestIndexPutAliasAction(settings, restController)); + registerHandler.accept(new RestIndicesAliasesAction(settings, restController)); + registerHandler.accept(new RestCreateIndexAction(settings, restController)); + registerHandler.accept(new RestShrinkIndexAction(settings, restController)); + registerHandler.accept(new RestRolloverIndexAction(settings, restController)); + registerHandler.accept(new RestDeleteIndexAction(settings, restController)); + registerHandler.accept(new RestCloseIndexAction(settings, restController)); + registerHandler.accept(new RestOpenIndexAction(settings, restController)); - registerRestHandler(handlers, RestUpdateSettingsAction.class); - registerRestHandler(handlers, RestGetSettingsAction.class); + registerHandler.accept(new RestUpdateSettingsAction(settings, restController)); + registerHandler.accept(new RestGetSettingsAction(settings, restController, indexScopedSettings, settingsFilter)); - registerRestHandler(handlers, RestAnalyzeAction.class); - registerRestHandler(handlers, RestGetIndexTemplateAction.class); - registerRestHandler(handlers, RestPutIndexTemplateAction.class); - registerRestHandler(handlers, RestDeleteIndexTemplateAction.class); - registerRestHandler(handlers, RestHeadIndexTemplateAction.class); + registerHandler.accept(new RestAnalyzeAction(settings, restController)); + registerHandler.accept(new RestGetIndexTemplateAction(settings, restController)); + registerHandler.accept(new RestPutIndexTemplateAction(settings, restController)); + registerHandler.accept(new RestDeleteIndexTemplateAction(settings, restController)); + registerHandler.accept(new RestHeadIndexTemplateAction(settings, restController)); - registerRestHandler(handlers, RestPutMappingAction.class); - registerRestHandler(handlers, RestGetMappingAction.class); - registerRestHandler(handlers, RestGetFieldMappingAction.class); + registerHandler.accept(new RestPutMappingAction(settings, restController)); + registerHandler.accept(new RestGetMappingAction(settings, restController)); + registerHandler.accept(new RestGetFieldMappingAction(settings, restController)); - registerRestHandler(handlers, RestRefreshAction.class); - registerRestHandler(handlers, RestFlushAction.class); - registerRestHandler(handlers, RestSyncedFlushAction.class); - registerRestHandler(handlers, RestForceMergeAction.class); - registerRestHandler(handlers, RestUpgradeAction.class); - registerRestHandler(handlers, RestClearIndicesCacheAction.class); + registerHandler.accept(new RestRefreshAction(settings, restController)); + registerHandler.accept(new RestFlushAction(settings, restController)); + registerHandler.accept(new RestSyncedFlushAction(settings, restController)); + registerHandler.accept(new RestForceMergeAction(settings, restController)); + registerHandler.accept(new RestUpgradeAction(settings, restController)); + registerHandler.accept(new RestClearIndicesCacheAction(settings, restController)); - registerRestHandler(handlers, RestIndexAction.class); - registerRestHandler(handlers, RestGetAction.class); - registerRestHandler(handlers, RestGetSourceAction.class); - registerRestHandler(handlers, RestHeadAction.Document.class); - registerRestHandler(handlers, RestHeadAction.Source.class); - registerRestHandler(handlers, RestMultiGetAction.class); - registerRestHandler(handlers, RestDeleteAction.class); - registerRestHandler(handlers, org.elasticsearch.rest.action.document.RestCountAction.class); - registerRestHandler(handlers, RestTermVectorsAction.class); - registerRestHandler(handlers, RestMultiTermVectorsAction.class); - registerRestHandler(handlers, RestBulkAction.class); - registerRestHandler(handlers, RestUpdateAction.class); + registerHandler.accept(new RestIndexAction(settings, restController)); + registerHandler.accept(new RestGetAction(settings, restController)); + registerHandler.accept(new RestGetSourceAction(settings, restController)); + registerHandler.accept(new RestHeadAction.Document(settings, restController)); + registerHandler.accept(new RestHeadAction.Source(settings, restController)); + registerHandler.accept(new RestMultiGetAction(settings, restController)); + registerHandler.accept(new RestDeleteAction(settings, restController)); + registerHandler.accept(new org.elasticsearch.rest.action.document.RestCountAction(settings, restController)); + registerHandler.accept(new RestTermVectorsAction(settings, restController)); + registerHandler.accept(new RestMultiTermVectorsAction(settings, restController)); + registerHandler.accept(new RestBulkAction(settings, restController)); + registerHandler.accept(new RestUpdateAction(settings, restController)); - registerRestHandler(handlers, RestSearchAction.class); - registerRestHandler(handlers, RestSearchScrollAction.class); - registerRestHandler(handlers, RestClearScrollAction.class); - registerRestHandler(handlers, RestMultiSearchAction.class); + registerHandler.accept(new RestSearchAction(settings, restController)); + registerHandler.accept(new RestSearchScrollAction(settings, restController)); + registerHandler.accept(new RestClearScrollAction(settings, restController)); + registerHandler.accept(new RestMultiSearchAction(settings, restController)); - registerRestHandler(handlers, RestValidateQueryAction.class); + registerHandler.accept(new RestValidateQueryAction(settings, restController)); - registerRestHandler(handlers, RestExplainAction.class); + registerHandler.accept(new RestExplainAction(settings, restController)); - registerRestHandler(handlers, RestRecoveryAction.class); + registerHandler.accept(new RestRecoveryAction(settings, restController)); // Scripts API - registerRestHandler(handlers, RestGetStoredScriptAction.class); - registerRestHandler(handlers, RestPutStoredScriptAction.class); - registerRestHandler(handlers, RestDeleteStoredScriptAction.class); + registerHandler.accept(new RestGetStoredScriptAction(settings, restController)); + registerHandler.accept(new RestPutStoredScriptAction(settings, restController)); + registerHandler.accept(new RestDeleteStoredScriptAction(settings, restController)); - registerRestHandler(handlers, RestFieldStatsAction.class); + registerHandler.accept(new RestFieldStatsAction(settings, restController)); // Tasks API - registerRestHandler(handlers, RestListTasksAction.class); - registerRestHandler(handlers, RestGetTaskAction.class); - registerRestHandler(handlers, RestCancelTasksAction.class); + registerHandler.accept(new RestListTasksAction(settings, restController, nodesInCluster)); + registerHandler.accept(new RestGetTaskAction(settings, restController)); + registerHandler.accept(new RestCancelTasksAction(settings, restController, nodesInCluster)); // Ingest API - registerRestHandler(handlers, RestPutPipelineAction.class); - registerRestHandler(handlers, RestGetPipelineAction.class); - registerRestHandler(handlers, RestDeletePipelineAction.class); - registerRestHandler(handlers, RestSimulatePipelineAction.class); + registerHandler.accept(new RestPutPipelineAction(settings, restController)); + registerHandler.accept(new RestGetPipelineAction(settings, restController)); + registerHandler.accept(new RestDeletePipelineAction(settings, restController)); + registerHandler.accept(new RestSimulatePipelineAction(settings, restController)); // CAT API - registerRestHandler(handlers, RestCatAction.class); - registerRestHandler(handlers, RestAllocationAction.class); - registerRestHandler(handlers, RestShardsAction.class); - registerRestHandler(handlers, RestMasterAction.class); - registerRestHandler(handlers, RestNodesAction.class); - registerRestHandler(handlers, RestTasksAction.class); - registerRestHandler(handlers, RestIndicesAction.class); - registerRestHandler(handlers, RestSegmentsAction.class); + registerHandler.accept(new RestAllocationAction(settings, restController)); + registerHandler.accept(new RestShardsAction(settings, restController)); + registerHandler.accept(new RestMasterAction(settings, restController)); + registerHandler.accept(new RestNodesAction(settings, restController)); + registerHandler.accept(new RestTasksAction(settings, restController, nodesInCluster)); + registerHandler.accept(new RestIndicesAction(settings, restController, indexNameExpressionResolver)); + registerHandler.accept(new RestSegmentsAction(settings, restController)); // Fully qualified to prevent interference with rest.action.count.RestCountAction - registerRestHandler(handlers, org.elasticsearch.rest.action.cat.RestCountAction.class); + registerHandler.accept(new org.elasticsearch.rest.action.cat.RestCountAction(settings, restController)); // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction - registerRestHandler(handlers, org.elasticsearch.rest.action.cat.RestRecoveryAction.class); - registerRestHandler(handlers, RestHealthAction.class); - registerRestHandler(handlers, org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class); - registerRestHandler(handlers, RestAliasAction.class); - registerRestHandler(handlers, RestThreadPoolAction.class); - registerRestHandler(handlers, RestPluginsAction.class); - registerRestHandler(handlers, RestFielddataAction.class); - registerRestHandler(handlers, RestNodeAttrsAction.class); - registerRestHandler(handlers, RestRepositoriesAction.class); - registerRestHandler(handlers, RestSnapshotAction.class); - registerRestHandler(handlers, RestTemplatesAction.class); + registerHandler.accept(new org.elasticsearch.rest.action.cat.RestRecoveryAction(settings, restController)); + registerHandler.accept(new RestHealthAction(settings, restController)); + registerHandler.accept(new org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction(settings, restController)); + registerHandler.accept(new RestAliasAction(settings, restController)); + registerHandler.accept(new RestThreadPoolAction(settings, restController)); + registerHandler.accept(new RestPluginsAction(settings, restController)); + registerHandler.accept(new RestFielddataAction(settings, restController)); + registerHandler.accept(new RestNodeAttrsAction(settings, restController)); + registerHandler.accept(new RestRepositoriesAction(settings, restController)); + registerHandler.accept(new RestSnapshotAction(settings, restController)); + registerHandler.accept(new RestTemplatesAction(settings, restController)); for (ActionPlugin plugin : actionPlugins) { - for (Class handler : plugin.getRestHandlers()) { - registerRestHandler(handlers, handler); + for (RestHandler handler : plugin.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, + settingsFilter, indexNameExpressionResolver, nodesInCluster)) { + registerHandler.accept(handler); } } - return handlers; - } - - private static void registerRestHandler(Set> handlers, Class handler) { - if (handlers.contains(handler)) { - throw new IllegalArgumentException("can't register the same [rest_handler] more than once for [" + handler.getName() + "]"); - } - handlers.add(handler); + registerHandler.accept(new RestCatAction(settings, restController, catActions)); } @Override @@ -647,23 +665,6 @@ public class ActionModule extends AbstractModule { bind(supportAction).asEagerSingleton(); } } - - // Bind the RestController which is required (by Node) even if rest isn't enabled. - bind(RestController.class).toInstance(restController); - - // Setup the RestHandlers - if (NetworkModule.HTTP_ENABLED.get(settings)) { - Multibinder restHandlers = Multibinder.newSetBinder(binder(), RestHandler.class); - Multibinder catHandlers = Multibinder.newSetBinder(binder(), AbstractCatAction.class); - for (Class handler : setupRestHandlers(actionPlugins)) { - bind(handler).asEagerSingleton(); - if (AbstractCatAction.class.isAssignableFrom(handler)) { - catHandlers.addBinding().to(handler.asSubclass(AbstractCatAction.class)); - } else { - restHandlers.addBinding().to(handler); - } - } - } } } diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 7a12ab8ace2..920ef1b5438 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -18,15 +18,19 @@ */ package org.elasticsearch.action; +import org.elasticsearch.Version; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.StatusToXContent; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.seqno.SequenceNumbersService; @@ -34,12 +38,26 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.util.Locale; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + /** * A base class for the response of a write operation that involves a single doc */ -public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContent { +public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContentObject { + + private static final String _SHARDS = "_shards"; + private static final String _INDEX = "_index"; + private static final String _TYPE = "_type"; + private static final String _ID = "_id"; + private static final String _VERSION = "_version"; + private static final String _SEQ_NO = "_seq_no"; + private static final String RESULT = "result"; + private static final String FORCED_REFRESH = "forced_refresh"; /** * An enum that represents the the results of CRUD operations, primarily used to communicate the type of @@ -185,8 +203,9 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr /** * Gets the location of the written document as a string suitable for a {@code Location} header. * @param routing any routing used in the request. If null the location doesn't include routing information. + * */ - public String getLocation(@Nullable String routing) { + public String getLocation(@Nullable String routing) throws URISyntaxException { // Absolute path for the location of the document. This should be allowed as of HTTP/1.1: // https://tools.ietf.org/html/rfc7231#section-7.1.2 String index = getIndex(); @@ -204,7 +223,9 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr if (routing != null) { location.append(routingStart).append(routing); } - return location.toString(); + + URI uri = new URI(location.toString()); + return uri.toASCIIString(); } @Override @@ -214,7 +235,11 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr type = in.readString(); id = in.readString(); version = in.readZLong(); - seqNo = in.readZLong(); + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + seqNo = in.readZLong(); + } else { + seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + } forcedRefresh = in.readBoolean(); result = Result.readFrom(in); } @@ -226,26 +251,49 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr out.writeString(type); out.writeString(id); out.writeZLong(version); - out.writeZLong(seqNo); + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + out.writeZLong(seqNo); + } out.writeBoolean(forcedRefresh); result.writeTo(out); } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder, params); + builder.endObject(); + return builder; + } + + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { ReplicationResponse.ShardInfo shardInfo = getShardInfo(); - builder.field("_index", shardId.getIndexName()) - .field("_type", type) - .field("_id", id) - .field("_version", version) - .field("result", getResult().getLowercase()); + builder.field(_INDEX, shardId.getIndexName()) + .field(_TYPE, type) + .field(_ID, id) + .field(_VERSION, version) + .field(RESULT, getResult().getLowercase()); if (forcedRefresh) { - builder.field("forced_refresh", forcedRefresh); + builder.field(FORCED_REFRESH, true); } - shardInfo.toXContent(builder, params); + builder.field(_SHARDS, shardInfo); if (getSeqNo() >= 0) { - builder.field("_seq_no", getSeqNo()); + builder.field(_SEQ_NO, getSeqNo()); } return builder; } + + /** + * Declare the {@link ObjectParser} fields to use when parsing a {@link DocWriteResponse} + */ + protected static void declareParserFields(ConstructingObjectParser objParser) { + objParser.declareString(constructorArg(), new ParseField(_INDEX)); + objParser.declareString(constructorArg(), new ParseField(_TYPE)); + objParser.declareString(constructorArg(), new ParseField(_ID)); + objParser.declareLong(constructorArg(), new ParseField(_VERSION)); + objParser.declareString(constructorArg(), new ParseField(RESULT)); + objParser.declareObject(optionalConstructorArg(), (p, c) -> ShardInfo.fromXContent(p), new ParseField(_SHARDS)); + objParser.declareLong(optionalConstructorArg(), new ParseField(_SEQ_NO)); + objParser.declareBoolean(DocWriteResponse::setForcedRefresh, new ParseField(FORCED_REFRESH)); + } } diff --git a/core/src/main/java/org/elasticsearch/action/ListenableActionFuture.java b/core/src/main/java/org/elasticsearch/action/ListenableActionFuture.java index 29b5a2a8774..87e4df3bc79 100644 --- a/core/src/main/java/org/elasticsearch/action/ListenableActionFuture.java +++ b/core/src/main/java/org/elasticsearch/action/ListenableActionFuture.java @@ -29,5 +29,5 @@ public interface ListenableActionFuture extends ActionFuture { /** * Add an action listener to be invoked when a response has received. */ - void addListener(final ActionListener listener); + void addListener(ActionListener listener); } diff --git a/core/src/main/java/org/elasticsearch/action/TaskOperationFailure.java b/core/src/main/java/org/elasticsearch/action/TaskOperationFailure.java index 6704f610ec0..519adc77b84 100644 --- a/core/src/main/java/org/elasticsearch/action/TaskOperationFailure.java +++ b/core/src/main/java/org/elasticsearch/action/TaskOperationFailure.java @@ -105,7 +105,7 @@ public final class TaskOperationFailure implements Writeable, ToXContent { if (reason != null) { builder.field("reason"); builder.startObject(); - ElasticsearchException.toXContent(builder, params, reason); + ElasticsearchException.generateThrowableXContent(builder, params, reason); builder.endObject(); } return builder; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index f31b1d37376..eab7853eefd 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -20,12 +20,11 @@ package org.elasticsearch.action.admin.cluster.allocation; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -40,36 +39,47 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; */ public class ClusterAllocationExplainRequest extends MasterNodeRequest { - private static ObjectParser PARSER = new ObjectParser( - "cluster/allocation/explain"); + private static ObjectParser PARSER = new ObjectParser<>("cluster/allocation/explain"); static { PARSER.declareString(ClusterAllocationExplainRequest::setIndex, new ParseField("index")); PARSER.declareInt(ClusterAllocationExplainRequest::setShard, new ParseField("shard")); PARSER.declareBoolean(ClusterAllocationExplainRequest::setPrimary, new ParseField("primary")); + PARSER.declareString(ClusterAllocationExplainRequest::setCurrentNode, new ParseField("current_node")); } + @Nullable private String index; + @Nullable private Integer shard; + @Nullable private Boolean primary; + @Nullable + private String currentNode; private boolean includeYesDecisions = false; private boolean includeDiskInfo = false; - /** Explain the first unassigned shard */ + /** + * Create a new allocation explain request to explain any unassigned shard in the cluster. + */ public ClusterAllocationExplainRequest() { this.index = null; this.shard = null; this.primary = null; + this.currentNode = null; } /** * Create a new allocation explain request. If {@code primary} is false, the first unassigned replica * will be picked for explanation. If no replicas are unassigned, the first assigned replica will * be explained. + * + * Package private for testing. */ - public ClusterAllocationExplainRequest(String index, int shard, boolean primary) { + ClusterAllocationExplainRequest(String index, int shard, boolean primary, @Nullable String currentNode) { this.index = index; this.shard = shard; this.primary = primary; + this.currentNode = currentNode; } @Override @@ -93,54 +103,103 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest nodeExplanations; + private final ShardRouting shardRouting; + private final DiscoveryNode currentNode; + private final DiscoveryNode relocationTargetNode; private final ClusterInfo clusterInfo; + private final ShardAllocationDecision shardAllocationDecision; - public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis, - long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch, - Map nodeExplanations, @Nullable ClusterInfo clusterInfo) { - this.shard = shard; - this.primary = primary; - this.hasPendingAsyncFetch = hasPendingAsyncFetch; - this.assignedNodeId = assignedNodeId; - this.unassignedInfo = unassignedInfo; - this.allocationDelayMillis = allocationDelayMillis; - this.remainingDelayMillis = remainingDelayMillis; - this.nodeExplanations = nodeExplanations; + public ClusterAllocationExplanation(ShardRouting shardRouting, @Nullable DiscoveryNode currentNode, + @Nullable DiscoveryNode relocationTargetNode, @Nullable ClusterInfo clusterInfo, + ShardAllocationDecision shardAllocationDecision) { + this.shardRouting = shardRouting; + this.currentNode = currentNode; + this.relocationTargetNode = relocationTargetNode; this.clusterInfo = clusterInfo; + this.shardAllocationDecision = shardAllocationDecision; } public ClusterAllocationExplanation(StreamInput in) throws IOException { - this.shard = ShardId.readShardId(in); - this.primary = in.readBoolean(); - this.hasPendingAsyncFetch = in.readBoolean(); - this.assignedNodeId = in.readOptionalString(); - this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new); - this.allocationDelayMillis = in.readVLong(); - this.remainingDelayMillis = in.readVLong(); - - int mapSize = in.readVInt(); - Map nodeToExplanation = new HashMap<>(mapSize); - for (int i = 0; i < mapSize; i++) { - NodeExplanation nodeExplanation = new NodeExplanation(in); - nodeToExplanation.put(nodeExplanation.getNode(), nodeExplanation); - } - this.nodeExplanations = nodeToExplanation; - if (in.readBoolean()) { - this.clusterInfo = new ClusterInfo(in); - } else { - this.clusterInfo = null; - } + this.shardRouting = new ShardRouting(in); + this.currentNode = in.readOptionalWriteable(DiscoveryNode::new); + this.relocationTargetNode = in.readOptionalWriteable(DiscoveryNode::new); + this.clusterInfo = in.readOptionalWriteable(ClusterInfo::new); + this.shardAllocationDecision = new ShardAllocationDecision(in); } @Override public void writeTo(StreamOutput out) throws IOException { - this.getShard().writeTo(out); - out.writeBoolean(this.isPrimary()); - out.writeBoolean(this.isStillFetchingShardData()); - out.writeOptionalString(this.getAssignedNodeId()); - out.writeOptionalWriteable(this.getUnassignedInfo()); - out.writeVLong(allocationDelayMillis); - out.writeVLong(remainingDelayMillis); - - out.writeVInt(this.nodeExplanations.size()); - for (NodeExplanation explanation : this.nodeExplanations.values()) { - explanation.writeTo(out); - } - if (this.clusterInfo != null) { - out.writeBoolean(true); - this.clusterInfo.writeTo(out); - } else { - out.writeBoolean(false); - } + shardRouting.writeTo(out); + out.writeOptionalWriteable(currentNode); + out.writeOptionalWriteable(relocationTargetNode); + out.writeOptionalWriteable(clusterInfo); + shardAllocationDecision.writeTo(out); } - /** Return the shard that the explanation is about */ + /** + * Returns the shard that the explanation is about. + */ public ShardId getShard() { - return this.shard; + return shardRouting.shardId(); } - /** Return true if the explained shard is primary, false otherwise */ + /** + * Returns {@code true} if the explained shard is primary, {@code false} otherwise. + */ public boolean isPrimary() { - return this.primary; + return shardRouting.primary(); } - /** Return turn if shard data is still being fetched for the allocation */ - public boolean isStillFetchingShardData() { - return this.hasPendingAsyncFetch; + /** + * Returns the current {@link ShardRoutingState} of the shard. + */ + public ShardRoutingState getShardState() { + return shardRouting.state(); } - /** Return turn if the shard is assigned to a node */ - public boolean isAssigned() { - return this.assignedNodeId != null; - } - - /** Return the assigned node id or null if not assigned */ + /** + * Returns the currently assigned node, or {@code null} if the shard is unassigned. + */ @Nullable - public String getAssignedNodeId() { - return this.assignedNodeId; + public DiscoveryNode getCurrentNode() { + return currentNode; } - /** Return the unassigned info for the shard or null if the shard is assigned */ + /** + * Returns the relocating target node, or {@code null} if the shard is not in the {@link ShardRoutingState#RELOCATING} state. + */ + @Nullable + public DiscoveryNode getRelocationTargetNode() { + return relocationTargetNode; + } + + /** + * Returns the unassigned info for the shard, or {@code null} if the shard is active. + */ @Nullable public UnassignedInfo getUnassignedInfo() { - return this.unassignedInfo; + return shardRouting.unassignedInfo(); } - /** Return the configured delay before the shard can be allocated in milliseconds */ - public long getAllocationDelayMillis() { - return this.allocationDelayMillis; - } - - /** Return the remaining allocation delay for this shard in milliseconds */ - public long getRemainingDelayMillis() { - return this.remainingDelayMillis; - } - - /** Return a map of node to the explanation for that node */ - public Map getNodeExplanations() { - return this.nodeExplanations; - } - - /** Return the cluster disk info for the cluster or null if none available */ + /** + * Returns the cluster disk info for the cluster, or {@code null} if none available. + */ @Nullable public ClusterInfo getClusterInfo() { return this.clusterInfo; } + /** \ + * Returns the shard allocation decision for attempting to assign or move the shard. + */ + public ShardAllocationDecision getShardAllocationDecision() { + return shardAllocationDecision; + } + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); { - builder.startObject("shard"); { - builder.field("index", shard.getIndexName()); - builder.field("index_uuid", shard.getIndex().getUUID()); - builder.field("id", shard.getId()); - builder.field("primary", primary); + builder.field("index", shardRouting.getIndexName()); + builder.field("shard", shardRouting.getId()); + builder.field("primary", shardRouting.primary()); + builder.field("current_state", shardRouting.state().toString().toLowerCase(Locale.ROOT)); + if (shardRouting.unassignedInfo() != null) { + unassignedInfoToXContent(shardRouting.unassignedInfo(), builder); } - builder.endObject(); // end shard - builder.field("assigned", this.assignedNodeId != null); - // If assigned, show the node id of the node it's assigned to - if (assignedNodeId != null) { - builder.field("assigned_node_id", this.assignedNodeId); - } - builder.field("shard_state_fetch_pending", this.hasPendingAsyncFetch); - // If we have unassigned info, show that - if (unassignedInfo != null) { - unassignedInfo.toXContent(builder, params); - builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis)); - builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis)); - } - builder.startObject("nodes"); { - for (NodeExplanation explanation : nodeExplanations.values()) { - explanation.toXContent(builder, params); + if (currentNode != null) { + builder.startObject("current_node"); + { + discoveryNodeToXContent(currentNode, true, builder); + if (shardAllocationDecision.getMoveDecision().isDecisionTaken() + && shardAllocationDecision.getMoveDecision().getCurrentNodeRanking() > 0) { + builder.field("weight_ranking", shardAllocationDecision.getMoveDecision().getCurrentNodeRanking()); + } } + builder.endObject(); } - builder.endObject(); // end nodes if (this.clusterInfo != null) { builder.startObject("cluster_info"); { this.clusterInfo.toXContent(builder, params); } builder.endObject(); // end "cluster_info" } + if (shardAllocationDecision.isDecisionTaken()) { + shardAllocationDecision.toXContent(builder, params); + } else { + String explanation; + if (shardRouting.state() == ShardRoutingState.RELOCATING) { + explanation = "the shard is in the process of relocating from node [" + currentNode.getName() + "] " + + "to node [" + relocationTargetNode.getName() + "], wait until relocation has completed"; + } else { + assert shardRouting.state() == ShardRoutingState.INITIALIZING; + explanation = "the shard is in the process of initializing on node [" + currentNode.getName() + "], " + + "wait until initialization has completed"; + } + builder.field("explanation", explanation); + } } builder.endObject(); // end wrapping object return builder; } - /** An Enum representing the final decision for a shard allocation on a node */ - public enum FinalDecision { - // Yes, the shard can be assigned - YES((byte) 0), - // No, the shard cannot be assigned - NO((byte) 1), - // The shard is already assigned to this node - ALREADY_ASSIGNED((byte) 2); + private XContentBuilder unassignedInfoToXContent(UnassignedInfo unassignedInfo, XContentBuilder builder) + throws IOException { - private final byte id; - - FinalDecision (byte id) { - this.id = id; + builder.startObject("unassigned_info"); + builder.field("reason", unassignedInfo.getReason()); + builder.field("at", UnassignedInfo.DATE_TIME_FORMATTER.printer().print(unassignedInfo.getUnassignedTimeInMillis())); + if (unassignedInfo.getNumFailedAllocations() > 0) { + builder.field("failed_allocation_attempts", unassignedInfo.getNumFailedAllocations()); } - - private static FinalDecision fromId(byte id) { - switch (id) { - case 0: return YES; - case 1: return NO; - case 2: return ALREADY_ASSIGNED; - default: - throw new IllegalArgumentException("unknown id for final decision: [" + id + "]"); - } - } - - @Override - public String toString() { - switch (id) { - case 0: return "YES"; - case 1: return "NO"; - case 2: return "ALREADY_ASSIGNED"; - default: - throw new IllegalArgumentException("unknown id for final decision: [" + id + "]"); - } - } - - static FinalDecision readFrom(StreamInput in) throws IOException { - return fromId(in.readByte()); - } - - void writeTo(StreamOutput out) throws IOException { - out.writeByte(id); - } - } - - /** An Enum representing the state of the shard store's copy of the data on a node */ - public enum StoreCopy { - // No data for this shard is on the node - NONE((byte) 0), - // A copy of the data is available on this node - AVAILABLE((byte) 1), - // The copy of the data on the node is corrupt - CORRUPT((byte) 2), - // There was an error reading this node's copy of the data - IO_ERROR((byte) 3), - // The copy of the data on the node is stale - STALE((byte) 4), - // It's unknown what the copy of the data is - UNKNOWN((byte) 5); - - private final byte id; - - StoreCopy (byte id) { - this.id = id; - } - - private static StoreCopy fromId(byte id) { - switch (id) { - case 0: return NONE; - case 1: return AVAILABLE; - case 2: return CORRUPT; - case 3: return IO_ERROR; - case 4: return STALE; - case 5: return UNKNOWN; - default: - throw new IllegalArgumentException("unknown id for store copy: [" + id + "]"); - } - } - - @Override - public String toString() { - switch (id) { - case 0: return "NONE"; - case 1: return "AVAILABLE"; - case 2: return "CORRUPT"; - case 3: return "IO_ERROR"; - case 4: return "STALE"; - case 5: return "UNKNOWN"; - default: - throw new IllegalArgumentException("unknown id for store copy: [" + id + "]"); - } - } - - static StoreCopy readFrom(StreamInput in) throws IOException { - return fromId(in.readByte()); - } - - void writeTo(StreamOutput out) throws IOException { - out.writeByte(id); + String details = unassignedInfo.getDetails(); + if (details != null) { + builder.field("details", details); } + builder.field("last_allocation_status", AllocationDecision.fromAllocationStatus(unassignedInfo.getLastAllocationStatus())); + builder.endObject(); + return builder; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java deleted file mode 100644 index 9fdf97b320c..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.allocation; - -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Map; -/** The cluster allocation explanation for a single node */ -public class NodeExplanation implements Writeable, ToXContent { - private final DiscoveryNode node; - private final Decision nodeDecision; - private final Float nodeWeight; - private final IndicesShardStoresResponse.StoreStatus storeStatus; - private final ClusterAllocationExplanation.FinalDecision finalDecision; - private final ClusterAllocationExplanation.StoreCopy storeCopy; - private final String finalExplanation; - - public NodeExplanation(final DiscoveryNode node, final Decision nodeDecision, final Float nodeWeight, - @Nullable final IndicesShardStoresResponse.StoreStatus storeStatus, - final ClusterAllocationExplanation.FinalDecision finalDecision, - final String finalExplanation, - final ClusterAllocationExplanation.StoreCopy storeCopy) { - this.node = node; - this.nodeDecision = nodeDecision; - this.nodeWeight = nodeWeight; - this.storeStatus = storeStatus; - this.finalDecision = finalDecision; - this.finalExplanation = finalExplanation; - this.storeCopy = storeCopy; - } - - public NodeExplanation(StreamInput in) throws IOException { - this.node = new DiscoveryNode(in); - this.nodeDecision = Decision.readFrom(in); - this.nodeWeight = in.readFloat(); - if (in.readBoolean()) { - this.storeStatus = IndicesShardStoresResponse.StoreStatus.readStoreStatus(in); - } else { - this.storeStatus = null; - } - this.finalDecision = ClusterAllocationExplanation.FinalDecision.readFrom(in); - this.finalExplanation = in.readString(); - this.storeCopy = ClusterAllocationExplanation.StoreCopy.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - node.writeTo(out); - nodeDecision.writeTo(out); - out.writeFloat(nodeWeight); - if (storeStatus == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - storeStatus.writeTo(out); - } - finalDecision.writeTo(out); - out.writeString(finalExplanation); - storeCopy.writeTo(out); - } - - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(node.getId()); { - builder.field("node_name", node.getName()); - builder.startObject("node_attributes"); { - for (Map.Entry attrEntry : node.getAttributes().entrySet()) { - builder.field(attrEntry.getKey(), attrEntry.getValue()); - } - } - builder.endObject(); // end attributes - builder.startObject("store"); { - builder.field("shard_copy", storeCopy.toString()); - if (storeStatus != null) { - final Throwable storeErr = storeStatus.getStoreException(); - if (storeErr != null) { - builder.field("store_exception", ExceptionsHelper.detailedMessage(storeErr)); - } - } - } - builder.endObject(); // end store - builder.field("final_decision", finalDecision.toString()); - builder.field("final_explanation", finalExplanation); - builder.field("weight", nodeWeight); - builder.startArray("decisions"); - nodeDecision.toXContent(builder, params); - builder.endArray(); - } - builder.endObject(); // end node - return builder; - } - - public DiscoveryNode getNode() { - return this.node; - } - - public Decision getDecision() { - return this.nodeDecision; - } - - public Float getWeight() { - return this.nodeWeight; - } - - @Nullable - public IndicesShardStoresResponse.StoreStatus getStoreStatus() { - return this.storeStatus; - } - - public ClusterAllocationExplanation.FinalDecision getFinalDecision() { - return this.finalDecision; - } - - public String getFinalExplanation() { - return this.finalExplanation; - } - - public ClusterAllocationExplanation.StoreCopy getStoreCopy() { - return this.storeCopy; - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 5aa35a059fb..4d4796aaf3a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -19,13 +19,7 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.apache.lucene.index.CorruptIndexException; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; -import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterInfo; @@ -33,34 +27,25 @@ import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RecoverySource; -import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.MoveDecision; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation.DebugMode; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; -import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; /** * The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the @@ -72,7 +57,6 @@ public class TransportClusterAllocationExplainAction private final ClusterInfoService clusterInfoService; private final AllocationDeciders allocationDeciders; private final ShardsAllocator shardAllocator; - private final TransportIndicesShardStoresAction shardStoresAction; private final GatewayAllocator gatewayAllocator; @Inject @@ -80,14 +64,12 @@ public class TransportClusterAllocationExplainAction ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders, - ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction, - GatewayAllocator gatewayAllocator) { + ShardsAllocator shardAllocator, GatewayAllocator gatewayAllocator) { super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterAllocationExplainRequest::new); this.clusterInfoService = clusterInfoService; this.allocationDeciders = allocationDeciders; this.shardAllocator = shardAllocator; - this.shardStoresAction = shardStoresAction; this.gatewayAllocator = gatewayAllocator; } @@ -106,172 +88,6 @@ public class TransportClusterAllocationExplainAction return new ClusterAllocationExplainResponse(); } - /** - * Return the decisions for the given {@code ShardRouting} on the given {@code RoutingNode}. If {@code includeYesDecisions} is not true, - * only non-YES (NO and THROTTLE) decisions are returned. - */ - public static Decision tryShardOnNode(ShardRouting shard, RoutingNode node, RoutingAllocation allocation, boolean includeYesDecisions) { - Decision d = allocation.deciders().canAllocate(shard, node, allocation); - if (includeYesDecisions) { - return d; - } else { - Decision.Multi nonYesDecisions = new Decision.Multi(); - List decisions = d.getDecisions(); - for (Decision decision : decisions) { - if (decision.type() != Decision.Type.YES) { - nonYesDecisions.add(decision); - } - } - return nonYesDecisions; - } - } - - /** - * Construct a {@code WeightedDecision} object for the given shard given all the metadata. This also attempts to construct the human - * readable FinalDecision and final explanation as part of the explanation. - */ - public static NodeExplanation calculateNodeExplanation(ShardRouting shard, - IndexMetaData indexMetaData, - DiscoveryNode node, - Decision nodeDecision, - Float nodeWeight, - IndicesShardStoresResponse.StoreStatus storeStatus, - String assignedNodeId, - Set activeAllocationIds, - boolean hasPendingAsyncFetch) { - final ClusterAllocationExplanation.FinalDecision finalDecision; - final ClusterAllocationExplanation.StoreCopy storeCopy; - final String finalExplanation; - - if (storeStatus == null) { - // No copies of the data - storeCopy = ClusterAllocationExplanation.StoreCopy.NONE; - } else { - final Exception storeErr = storeStatus.getStoreException(); - if (storeErr != null) { - if (ExceptionsHelper.unwrapCause(storeErr) instanceof CorruptIndexException) { - storeCopy = ClusterAllocationExplanation.StoreCopy.CORRUPT; - } else { - storeCopy = ClusterAllocationExplanation.StoreCopy.IO_ERROR; - } - } else if (activeAllocationIds.isEmpty()) { - // The ids are only empty if dealing with a legacy index - // TODO: fetch the shard state versions and display here? - storeCopy = ClusterAllocationExplanation.StoreCopy.UNKNOWN; - } else if (activeAllocationIds.contains(storeStatus.getAllocationId())) { - storeCopy = ClusterAllocationExplanation.StoreCopy.AVAILABLE; - } else { - // Otherwise, this is a stale copy of the data (allocation ids don't match) - storeCopy = ClusterAllocationExplanation.StoreCopy.STALE; - } - } - - if (node.getId().equals(assignedNodeId)) { - finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED; - finalExplanation = "the shard is already assigned to this node"; - } else if (shard.unassigned() && shard.primary() == false && - shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && nodeDecision.type() != Decision.Type.YES) { - finalExplanation = "the shard cannot be assigned because allocation deciders return a " + nodeDecision.type().name() + - " decision"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else if (shard.unassigned() && shard.primary() == false && - shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && hasPendingAsyncFetch) { - finalExplanation = "the shard's state is still being fetched so it cannot be allocated"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else if (shard.primary() && shard.unassigned() && - (shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE || - shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) - && hasPendingAsyncFetch) { - finalExplanation = "the shard's state is still being fetched so it cannot be allocated"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE && - storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) { - finalExplanation = "the copy of the shard is stale, allocation ids do not match"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE && - storeCopy == ClusterAllocationExplanation.StoreCopy.NONE) { - finalExplanation = "there is no copy of the shard available"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE && - storeCopy == ClusterAllocationExplanation.StoreCopy.CORRUPT) { - finalExplanation = "the copy of the shard is corrupt"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE && - storeCopy == ClusterAllocationExplanation.StoreCopy.IO_ERROR) { - finalExplanation = "the copy of the shard cannot be read"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else { - if (nodeDecision.type() == Decision.Type.NO) { - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - finalExplanation = "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision"; - } else { - // TODO: handle throttling decision better here - finalDecision = ClusterAllocationExplanation.FinalDecision.YES; - if (storeCopy == ClusterAllocationExplanation.StoreCopy.AVAILABLE) { - finalExplanation = "the shard can be assigned and the node contains a valid copy of the shard data"; - } else { - finalExplanation = "the shard can be assigned"; - } - } - } - return new NodeExplanation(node, nodeDecision, nodeWeight, storeStatus, finalDecision, finalExplanation, storeCopy); - } - - - /** - * For the given {@code ShardRouting}, return the explanation of the allocation for that shard on all nodes. If {@code - * includeYesDecisions} is true, returns all decisions, otherwise returns only 'NO' and 'THROTTLE' decisions. - */ - public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes, - boolean includeYesDecisions, ShardsAllocator shardAllocator, - List shardStores, - GatewayAllocator gatewayAllocator, ClusterInfo clusterInfo) { - // don't short circuit deciders, we want a full explanation - allocation.debugDecision(true); - // get the existing unassigned info if available - UnassignedInfo ui = shard.unassignedInfo(); - - Map nodeToDecision = new HashMap<>(); - for (RoutingNode node : routingNodes) { - DiscoveryNode discoNode = node.node(); - if (discoNode.isDataNode()) { - Decision d = tryShardOnNode(shard, node, allocation, includeYesDecisions); - nodeToDecision.put(discoNode, d); - } - } - long remainingDelayMillis = 0; - final MetaData metadata = allocation.metaData(); - final IndexMetaData indexMetaData = metadata.index(shard.index()); - long allocationDelayMillis = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).getMillis(); - if (ui != null && ui.isDelayed()) { - long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), indexMetaData.getSettings()); - remainingDelayMillis = TimeValue.timeValueNanos(remainingDelayNanos).millis(); - } - - // Calculate weights for each of the nodes - Map weights = shardAllocator.weighShard(allocation, shard); - - Map nodeToStatus = new HashMap<>(shardStores.size()); - for (IndicesShardStoresResponse.StoreStatus status : shardStores) { - nodeToStatus.put(status.getNode(), status); - } - - Map explanations = new HashMap<>(shardStores.size()); - for (Map.Entry entry : nodeToDecision.entrySet()) { - DiscoveryNode node = entry.getKey(); - Decision decision = entry.getValue(); - Float weight = weights.get(node); - IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node); - NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight, - storeStatus, shard.currentNodeId(), indexMetaData.inSyncAllocationIds(shard.getId()), - allocation.hasPendingAsyncFetch()); - explanations.put(node, nodeExplanation); - } - return new ClusterAllocationExplanation(shard.shardId(), shard.primary(), - shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui, - gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations, clusterInfo); - } - @Override protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state, final ActionListener listener) { @@ -280,66 +96,105 @@ public class TransportClusterAllocationExplainAction final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state, clusterInfo, System.nanoTime(), false); + ShardRouting shardRouting = findShardToExplain(request, allocation); + logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting); + + ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, + request.includeDiskInfo() ? clusterInfo : null, request.includeYesDecisions(), gatewayAllocator, shardAllocator); + listener.onResponse(new ClusterAllocationExplainResponse(cae)); + } + + // public for testing + public static ClusterAllocationExplanation explainShard(ShardRouting shardRouting, RoutingAllocation allocation, + ClusterInfo clusterInfo, boolean includeYesDecisions, + GatewayAllocator gatewayAllocator, ShardsAllocator shardAllocator) { + allocation.setDebugMode(includeYesDecisions ? DebugMode.ON : DebugMode.EXCLUDE_YES_DECISIONS); + + ShardAllocationDecision shardDecision; + if (shardRouting.initializing() || shardRouting.relocating()) { + shardDecision = ShardAllocationDecision.NOT_TAKEN; + } else { + AllocateUnassignedDecision allocateDecision = shardRouting.unassigned() ? + gatewayAllocator.decideUnassignedShardAllocation(shardRouting, allocation) : AllocateUnassignedDecision.NOT_TAKEN; + if (allocateDecision.isDecisionTaken() == false) { + shardDecision = shardAllocator.decideShardAllocation(shardRouting, allocation); + } else { + shardDecision = new ShardAllocationDecision(allocateDecision, MoveDecision.NOT_TAKEN); + } + } + + return new ClusterAllocationExplanation(shardRouting, + shardRouting.currentNodeId() != null ? allocation.nodes().get(shardRouting.currentNodeId()) : null, + shardRouting.relocatingNodeId() != null ? allocation.nodes().get(shardRouting.relocatingNodeId()) : null, + clusterInfo, shardDecision); + } + + // public for testing + public static ShardRouting findShardToExplain(ClusterAllocationExplainRequest request, RoutingAllocation allocation) { ShardRouting foundShard = null; if (request.useAnyUnassignedShard()) { // If we can use any shard, just pick the first unassigned one (if there are any) - RoutingNodes.UnassignedShards.UnassignedIterator ui = routingNodes.unassigned().iterator(); + RoutingNodes.UnassignedShards.UnassignedIterator ui = allocation.routingNodes().unassigned().iterator(); if (ui.hasNext()) { foundShard = ui.next(); } + if (foundShard == null) { + throw new IllegalStateException("unable to find any unassigned shards to explain [" + request + "]"); + } } else { String index = request.getIndex(); int shard = request.getShard(); if (request.isPrimary()) { // If we're looking for the primary shard, there's only one copy, so pick it directly foundShard = allocation.routingTable().shardRoutingTable(index, shard).primaryShard(); + if (request.getCurrentNode() != null) { + DiscoveryNode primaryNode = allocation.nodes().resolveNode(request.getCurrentNode()); + // the primary is assigned to a node other than the node specified in the request + if (primaryNode.getId().equals(foundShard.currentNodeId()) == false) { + throw new IllegalStateException("unable to find primary shard assigned to node [" + request.getCurrentNode() + "]"); + } + } } else { // If looking for a replica, go through all the replica shards List replicaShardRoutings = allocation.routingTable().shardRoutingTable(index, shard).replicaShards(); - if (replicaShardRoutings.size() > 0) { - // Pick the first replica at the very least - foundShard = replicaShardRoutings.get(0); - // In case there are multiple replicas where some are assigned and some aren't, - // try to find one that is unassigned at least + if (request.getCurrentNode() != null) { + // the request is to explain a replica shard already assigned on a particular node, + // so find that shard copy + DiscoveryNode replicaNode = allocation.nodes().resolveNode(request.getCurrentNode()); for (ShardRouting replica : replicaShardRoutings) { - if (replica.unassigned()) { + if (replicaNode.getId().equals(replica.currentNodeId())) { foundShard = replica; break; } } + if (foundShard == null) { + throw new IllegalStateException("unable to find a replica shard assigned to node [" + + request.getCurrentNode() + "]"); + } + } else { + if (replicaShardRoutings.size() > 0) { + // Pick the first replica at the very least + foundShard = replicaShardRoutings.get(0); + for (ShardRouting replica : replicaShardRoutings) { + // In case there are multiple replicas where some are assigned and some aren't, + // try to find one that is unassigned at least + if (replica.unassigned()) { + foundShard = replica; + break; + } else if (replica.started() && (foundShard.initializing() || foundShard.relocating())) { + // prefer started shards to initializing or relocating shards because started shards + // can be explained + foundShard = replica; + } + } + } } } } if (foundShard == null) { - listener.onFailure(new ElasticsearchException("unable to find any shards to explain [{}] in the routing table", request)); - return; + throw new IllegalStateException("unable to find any shards to explain [" + request + "] in the routing table"); } - final ShardRouting shardRouting = foundShard; - logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting); - - getShardStores(shardRouting, new ActionListener() { - @Override - public void onResponse(IndicesShardStoresResponse shardStoreResponse) { - ImmutableOpenIntMap> shardStatuses = - shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName()); - List shardStoreStatus = shardStatuses.get(shardRouting.id()); - ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes, - request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator, - request.includeDiskInfo() ? clusterInfo : null); - listener.onResponse(new ClusterAllocationExplainResponse(cae)); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); - } - - private void getShardStores(ShardRouting shard, final ActionListener listener) { - IndicesShardStoresRequest request = new IndicesShardStoresRequest(shard.getIndexName()); - request.shardStatuses("all"); - shardStoresAction.execute(request, listener); + return foundShard; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java index e4a575dcf79..a9a2c36970e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -24,19 +24,19 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterIndexHealth; import org.elasticsearch.cluster.health.ClusterStateHealth; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.StatusToXContent; +import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.util.Locale; import java.util.Map; -public class ClusterHealthResponse extends ActionResponse implements StatusToXContent { +public class ClusterHealthResponse extends ActionResponse implements StatusToXContentObject { private String clusterName; private int numberOfPendingTasks = 0; private int numberOfInFlightFetch = 0; @@ -200,18 +200,9 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo taskMaxWaitingTime.writeTo(out); } - @Override public String toString() { - try { - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); - builder.startObject(); - toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return builder.string(); - } catch (IOException e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; - } + return Strings.toString(this); } @Override @@ -240,6 +231,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.field(CLUSTER_NAME, getClusterName()); builder.field(STATUS, getStatus().name().toLowerCase(Locale.ROOT)); builder.field(TIMED_OUT, isTimedOut()); @@ -268,6 +260,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo } builder.endObject(); } + builder.endObject(); return builder; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 7dd2c0df84c..44c604dc8b8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -85,37 +86,55 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< protected void masterOperation(Task task, final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener listener) { if (request.waitForEvents() != null) { final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis(); - clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) { - @Override - public ClusterState execute(ClusterState currentState) { - return currentState; - } + if (request.local()) { + clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new LocalClusterUpdateTask(request.waitForEvents()) { + @Override + public ClusterTasksResult execute(ClusterState currentState) { + return unchanged(); + } - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime())); - final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); - request.timeout(newTimeout); - executeHealth(request, listener); - } + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime())); + final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); + request.timeout(newTimeout); + executeHealth(request, listener); + } - @Override - public void onNoLongerMaster(String source) { - logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents()); - doExecute(task, request, listener); - } + @Override + public void onFailure(String source, Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + listener.onFailure(e); + } + }); + } else { + clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } - @Override - public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - listener.onFailure(e); - } + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime())); + final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); + request.timeout(newTimeout); + executeHealth(request, listener); + } - @Override - public boolean runOnlyOnMaster() { - return !request.local(); - } - }); + @Override + public void onNoLongerMaster(String source) { + logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents()); + doExecute(task, request, listener); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + listener.onFailure(e); + } + }); + } } else { executeHealth(request, listener); } @@ -141,8 +160,8 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< } assert waitFor >= 0; - final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext()); - final ClusterState state = observer.observedState(); + final ClusterState state = clusterService.state(); + final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService, null, logger, threadPool.getThreadContext()); if (request.timeout().millis() == 0) { listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0)); return; @@ -163,8 +182,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< @Override public void onTimeout(TimeValue timeout) { - final ClusterState clusterState = clusterService.state(); - final ClusterHealthResponse response = getResponse(request, clusterState, concreteWaitFor, true); + final ClusterHealthResponse response = getResponse(request, observer.setAndGetObservedState(), concreteWaitFor, true); listener.onResponse(response); } }; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index c26554b25e0..7d80b84d5d2 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.service.NodeService; +import org.elasticsearch.node.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index b4cef38d28d..e4034582f96 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.service.NodeService; +import org.elasticsearch.node.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index ce5d92753a8..50786d36766 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -19,21 +19,21 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; @@ -49,9 +49,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; /** @@ -116,18 +114,16 @@ public class TransportCancelTasksAction extends TransportTasksAction listener) { - final BanLock banLock = new BanLock(nodes -> removeBanOnNodes(cancellableTask, nodes)); - Set childNodes = taskManager.cancel(cancellableTask, request.getReason(), banLock::onTaskFinished); - if (childNodes != null) { - if (childNodes.isEmpty()) { - // The task has no child tasks, so we can return immediately - logger.trace("cancelling task {} with no children", cancellableTask.getId()); - listener.onResponse(cancellableTask.taskInfo(clusterService.localNode().getId(), false)); - } else { - // The task has some child tasks, we need to wait for until ban is set on all nodes - logger.trace("cancelling task {} with children on nodes [{}]", cancellableTask.getId(), childNodes); - String nodeId = clusterService.localNode().getId(); - AtomicInteger responses = new AtomicInteger(childNodes.size()); + String nodeId = clusterService.localNode().getId(); + final boolean canceled; + if (cancellableTask.shouldCancelChildrenOnCancellation()) { + DiscoveryNodes childNodes = clusterService.state().nodes(); + final BanLock banLock = new BanLock(childNodes.getSize(), () -> removeBanOnNodes(cancellableTask, childNodes)); + canceled = taskManager.cancel(cancellableTask, request.getReason(), banLock::onTaskFinished); + if (canceled) { + // /In case the task has some child tasks, we need to wait for until ban is set on all nodes + logger.trace("cancelling task {} on child nodes", cancellableTask.getId()); + AtomicInteger responses = new AtomicInteger(childNodes.getSize()); List failures = new ArrayList<>(); setBanOnNodes(request.getReason(), cancellableTask, childNodes, new ActionListener() { @Override @@ -157,83 +153,74 @@ public class TransportCancelTasksAction extends TransportTasksAction listener.onResponse(cancellableTask.taskInfo(nodeId, false))); + if (canceled) { + logger.trace("task {} doesn't have any children that should be cancelled", cancellableTask.getId()); + } + } + if (canceled == false) { logger.trace("task {} is already cancelled", cancellableTask.getId()); throw new IllegalStateException("task with id " + cancellableTask.getId() + " is already cancelled"); } } + @Override protected boolean accumulateExceptions() { return true; } - private void setBanOnNodes(String reason, CancellableTask task, Set nodes, ActionListener listener) { + private void setBanOnNodes(String reason, CancellableTask task, DiscoveryNodes nodes, ActionListener listener) { sendSetBanRequest(nodes, BanParentTaskRequest.createSetBanParentTaskRequest(new TaskId(clusterService.localNode().getId(), task.getId()), reason), listener); } - private void removeBanOnNodes(CancellableTask task, Set nodes) { + private void removeBanOnNodes(CancellableTask task, DiscoveryNodes nodes) { sendRemoveBanRequest(nodes, BanParentTaskRequest.createRemoveBanParentTaskRequest(new TaskId(clusterService.localNode().getId(), task.getId()))); } - private void sendSetBanRequest(Set nodes, BanParentTaskRequest request, ActionListener listener) { - ClusterState clusterState = clusterService.state(); - for (String node : nodes) { - DiscoveryNode discoveryNode = clusterState.getNodes().get(node); - if (discoveryNode != null) { - // Check if node still in the cluster - logger.trace("Sending ban for tasks with the parent [{}] to the node [{}], ban [{}]", request.parentTaskId, node, - request.ban); - transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request, - new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleResponse(TransportResponse.Empty response) { - listener.onResponse(null); - } + private void sendSetBanRequest(DiscoveryNodes nodes, BanParentTaskRequest request, ActionListener listener) { + for (ObjectObjectCursor node : nodes.getNodes()) { + logger.trace("Sending ban for tasks with the parent [{}] to the node [{}], ban [{}]", request.parentTaskId, node.key, + request.ban); + transportService.sendRequest(node.value, BAN_PARENT_ACTION_NAME, request, + new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleResponse(TransportResponse.Empty response) { + listener.onResponse(null); + } - @Override - public void handleException(TransportException exp) { - logger.warn("Cannot send ban for tasks with the parent [{}] to the node [{}]", request.parentTaskId, node); - listener.onFailure(exp); - } - }); - } else { - listener.onResponse(null); - logger.debug("Cannot send ban for tasks with the parent [{}] to the node [{}] - the node no longer in the cluster", - request.parentTaskId, node); - } + @Override + public void handleException(TransportException exp) { + logger.warn("Cannot send ban for tasks with the parent [{}] to the node [{}]", request.parentTaskId, node.key); + listener.onFailure(exp); + } + }); } } - private void sendRemoveBanRequest(Set nodes, BanParentTaskRequest request) { - ClusterState clusterState = clusterService.state(); - for (String node : nodes) { - DiscoveryNode discoveryNode = clusterState.getNodes().get(node); - if (discoveryNode != null) { - // Check if node still in the cluster - logger.debug("Sending remove ban for tasks with the parent [{}] to the node [{}]", request.parentTaskId, node); - transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request, EmptyTransportResponseHandler - .INSTANCE_SAME); - } else { - logger.debug("Cannot send remove ban request for tasks with the parent [{}] to the node [{}] - the node no longer in " + - "the cluster", request.parentTaskId, node); - } + private void sendRemoveBanRequest(DiscoveryNodes nodes, BanParentTaskRequest request) { + for (ObjectObjectCursor node : nodes.getNodes()) { + logger.debug("Sending remove ban for tasks with the parent [{}] to the node [{}]", request.parentTaskId, node.key); + transportService.sendRequest(node.value, BAN_PARENT_ACTION_NAME, request, EmptyTransportResponseHandler + .INSTANCE_SAME); } } private static class BanLock { - private final Consumer> finish; + private final Runnable finish; private final AtomicInteger counter; - private final AtomicReference> nodes = new AtomicReference<>(); + private final int nodesSize; - public BanLock(Consumer> finish) { + BanLock(int nodesSize, Runnable finish) { counter = new AtomicInteger(0); this.finish = finish; + this.nodesSize = nodesSize; } public void onBanSet() { @@ -242,15 +229,14 @@ public class TransportCancelTasksAction extends TransportTasksAction nodes) { - this.nodes.set(nodes); - if (counter.addAndGet(nodes.size()) == 0) { + public void onTaskFinished() { + if (counter.addAndGet(nodesSize) == 0) { finish(); } } public void finish() { - finish.accept(nodes.get()); + finish.run(); } } @@ -282,7 +268,7 @@ public class TransportCancelTasksAction extends TransportTasksAction() { @Override @@ -246,8 +248,8 @@ public class TransportGetTaskAction extends HandledTransportAction ParseFieldMatcher.STRICT); + try (XContentParser parser = XContentHelper.createParser(xContentRegistry, response.getSourceAsBytesRef())) { + TaskResult result = TaskResult.PARSER.apply(parser, null); listener.onResponse(new GetTaskResponse(result)); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index b33226b973b..a203dd35b47 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -27,7 +27,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; @@ -43,7 +43,7 @@ import java.util.stream.Collectors; /** * Returns the list of tasks currently running on the nodes */ -public class ListTasksResponse extends BaseTasksResponse implements ToXContent { +public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject { private List tasks; @@ -161,8 +161,9 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent { } builder.startObject("tasks"); for(TaskInfo task : entry.getValue()) { - builder.field(task.getTaskId().toString()); + builder.startObject(task.getTaskId().toString()); task.toXContent(builder, params); + builder.endObject(); } builder.endObject(); builder.endObject(); @@ -187,7 +188,10 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return toXContentGroupedByParents(builder, params); + builder.startObject(); + toXContentGroupedByParents(builder, params); + builder.endObject(); + return builder; } private void toXContentCommon(XContentBuilder builder, Params params) throws IOException { @@ -214,6 +218,6 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent { @Override public String toString() { - return Strings.toString(this, true); + return Strings.toString(this); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java index b254137163d..87bf70acede 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java @@ -81,7 +81,7 @@ public class TaskGroup implements ToXContent { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - task.innerToXContent(builder, params); + task.toXContent(builder, params); if (childTasks.isEmpty() == false) { builder.startArray("children"); for (TaskGroup taskGroup : childTasks) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index a06175a598b..f0f8d50b4c1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -142,14 +142,28 @@ public class PutRepositoryRequest extends AcknowledgedRequest indicesAndFilters) { + public ClusterSearchShardsResponse(ClusterSearchShardsGroup[] groups, DiscoveryNode[] nodes, + Map indicesAndFilters) { this.groups = groups; this.nodes = nodes; this.indicesAndFilters = indicesAndFilters; @@ -104,6 +105,7 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.startObject("nodes"); for (DiscoveryNode node : nodes) { node.toXContent(builder, params); @@ -129,7 +131,7 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo group.toXContent(builder, params); } builder.endArray(); + builder.endObject(); return builder; } - } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 9cbc1b65632..3267b6d9c96 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -42,7 +42,7 @@ import static org.elasticsearch.common.Strings.EMPTY_ARRAY; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; -import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; /** * Create snapshot request @@ -288,18 +288,34 @@ public class CreateSnapshotRequest extends MasterNodeRequest * See repository documentation for more information. * * @param source repository-specific snapshot settings * @return this request + * @deprecated use {@link #settings(String, XContentType)} to avoid content type detection */ + @Deprecated public CreateSnapshotRequest settings(String source) { this.settings = Settings.builder().loadFromSource(source).build(); return this; } + /** + * Sets repository-specific snapshot settings in JSON or YAML format + *

+ * See repository documentation for more information. + * + * @param source repository-specific snapshot settings + * @param xContentType the content type of the source + * @return this request + */ + public CreateSnapshotRequest settings(String source, XContentType xContentType) { + this.settings = Settings.builder().loadFromSource(source, xContentType).build(); + return this; + } + /** * Sets repository-specific snapshot settings. *

@@ -312,7 +328,7 @@ public class CreateSnapshotRequest extends MasterNodeRequest) entry.getValue()); } else if (name.equals("include_global_state")) { - includeGlobalState = lenientNodeBooleanValue(entry.getValue()); + includeGlobalState = nodeBooleanValue(entry.getValue(), "include_global_state"); } } indicesOptions(IndicesOptions.fromMap((Map) source, IndicesOptions.lenientExpandOpen())); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index ebdd206b5c3..d3b5e12351c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; import java.util.Map; @@ -147,12 +148,28 @@ public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuil * * @param source repository-specific snapshot settings * @return this builder + * @deprecated use {@link #setSettings(String, XContentType)} to avoid content type detection */ + @Deprecated public CreateSnapshotRequestBuilder setSettings(String source) { request.settings(source); return this; } + /** + * Sets repository-specific snapshot settings in YAML or JSON format + *

+ * See repository documentation for more information. + * + * @param source repository-specific snapshot settings + * @param xContentType the content type of the source + * @return this builder + */ + public CreateSnapshotRequestBuilder setSettings(String source, XContentType xContentType) { + request.settings(source, xContentType); + return this; + } + /** * Sets repository-specific snapshot settings. *

diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java index efc2fbeb5b5..1f9f77f9ed3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.snapshots.SnapshotInfo; @@ -33,7 +33,7 @@ import java.io.IOException; /** * Create snapshot response */ -public class CreateSnapshotResponse extends ActionResponse implements ToXContent { +public class CreateSnapshotResponse extends ActionResponse implements ToXContentObject { @Nullable private SnapshotInfo snapshotInfo; @@ -83,12 +83,14 @@ public class CreateSnapshotResponse extends ActionResponse implements ToXContent @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); if (snapshotInfo != null) { builder.field("snapshot"); snapshotInfo.toXContent(builder, params); } else { builder.field("accepted", true); } + builder.endObject(); return builder; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index 3c37d1870e5..5cce5482ec5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -75,6 +75,6 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeAction snapshots = Collections.emptyList(); @@ -76,11 +77,13 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent { @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); builder.startArray("snapshots"); for (SnapshotInfo snapshotInfo : snapshots) { snapshotInfo.toXContent(builder, params); } builder.endArray(); + builder.endObject(); return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 573bb0ea263..ad8541ce9fd 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -88,7 +88,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction * See repository documentation for more information. * * @param source repository-specific snapshot settings * @return this request + * @deprecated use {@link #settings(String, XContentType)} to avoid content type detection */ + @Deprecated public RestoreSnapshotRequest settings(String source) { this.settings = Settings.builder().loadFromSource(source).build(); return this; } + /** + * Sets repository-specific restore settings in JSON or YAML format + *

+ * See repository documentation for more information. + * + * @param source repository-specific snapshot settings + * @param xContentType the content type of the source + * @return this request + */ + public RestoreSnapshotRequest settings(String source, XContentType xContentType) { + this.settings = Settings.builder().loadFromSource(source, xContentType).build(); + return this; + } + /** * Sets repository-specific restore settings *

@@ -337,7 +353,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest) entry.getValue()); } else if (name.equals("include_global_state")) { - includeGlobalState = lenientNodeBooleanValue(entry.getValue()); + includeGlobalState = nodeBooleanValue(entry.getValue(), "include_global_state"); } else if (name.equals("include_aliases")) { - includeAliases = lenientNodeBooleanValue(entry.getValue()); + includeAliases = nodeBooleanValue(entry.getValue(), "include_aliases"); } else if (name.equals("rename_pattern")) { if (entry.getValue() instanceof String) { renamePattern((String) entry.getValue()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 661a1a1d018..807e2387243 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; import java.util.List; import java.util.Map; @@ -153,18 +154,34 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui } /** - * Sets repository-specific restore settings in JSON, YAML or properties format + * Sets repository-specific restore settings in JSON or YAML format *

* See repository documentation for more information. * * @param source repository-specific snapshot settings * @return this builder + * @deprecated use {@link #setSettings(String, XContentType)} to avoid content type detection */ + @Deprecated public RestoreSnapshotRequestBuilder setSettings(String source) { request.settings(source); return this; } + /** + * Sets repository-specific restore settings in JSON or YAML format + *

+ * See repository documentation for more information. + * + * @param source repository-specific snapshot settings + * @param xContentType the content type of the source + * @return this builder + */ + public RestoreSnapshotRequestBuilder setSettings(String source, XContentType xContentType) { + request.settings(source, xContentType); + return this; + } + /** * Sets repository-specific restore settings *

@@ -251,12 +268,26 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui * * @param source index settings * @return this builder + * @deprecated use {@link #setIndexSettings(String, XContentType)} to avoid content type detection */ + @Deprecated public RestoreSnapshotRequestBuilder setIndexSettings(String source) { request.indexSettings(source); return this; } + /** + * Sets index settings that should be added or replaced during restore + * + * @param source index settings + * @param xContentType the content type of the source + * @return this builder + */ + public RestoreSnapshotRequestBuilder setIndexSettings(String source, XContentType xContentType) { + request.indexSettings(source, xContentType); + return this; + } + /** * Sets index settings that should be added or replaced during restore * diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java index 70f4f2aa4f2..5a02e4bcb13 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.snapshots.RestoreInfo; @@ -33,7 +34,7 @@ import java.io.IOException; /** * Contains information about restores snapshot */ -public class RestoreSnapshotResponse extends ActionResponse implements ToXContent { +public class RestoreSnapshotResponse extends ActionResponse implements ToXContentObject { @Nullable private RestoreInfo restoreInfo; @@ -75,12 +76,14 @@ public class RestoreSnapshotResponse extends ActionResponse implements ToXConten @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); if (restoreInfo != null) { builder.field("snapshot"); restoreInfo.toXContent(builder, params); } else { builder.field("accepted", true); } + builder.endObject(); return builder; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java index d96daa86f76..c523fbbac3b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java @@ -47,7 +47,7 @@ public enum SnapshotIndexShardStage { private boolean completed; - private SnapshotIndexShardStage(byte value, boolean completed) { + SnapshotIndexShardStage(byte value, boolean completed) { this.value = value; this.completed = completed; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java index b9800a2d9ed..d44a490680c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -33,7 +33,7 @@ import java.util.List; /** * Snapshot status response */ -public class SnapshotsStatusResponse extends ActionResponse implements ToXContent { +public class SnapshotsStatusResponse extends ActionResponse implements ToXContentObject { private List snapshots = Collections.emptyList(); @@ -75,11 +75,13 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.startArray("snapshots"); for (SnapshotStatus snapshot : snapshots) { snapshot.toXContent(builder, params); } builder.endArray(); + builder.endObject(); return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index c73ae48d070..7406b0fea4a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -36,7 +36,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotMissingException; @@ -201,7 +203,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction 0) { final Set requestedSnapshotNames = Sets.newHashSet(request.snapshots()); - final Map matchedSnapshotIds = snapshotsService.snapshotIds(repositoryName).stream() + final RepositoryData repositoryData = snapshotsService.getRepositoryData(repositoryName); + final Map matchedSnapshotIds = repositoryData.getAllSnapshotIds().stream() .filter(s -> requestedSnapshotNames.contains(s.getName())) .collect(Collectors.toMap(SnapshotId::getName, Function.identity())); for (final String snapshotName : request.snapshots()) { @@ -220,6 +223,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction shardStatusBuilder = new ArrayList<>(); @@ -243,7 +248,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction { private String id; - private String scriptLang; + private String lang; DeleteStoredScriptRequest() { + super(); } - public DeleteStoredScriptRequest(String scriptLang, String id) { - this.scriptLang = scriptLang; + public DeleteStoredScriptRequest(String id, String lang) { + super(); + this.id = id; + this.lang = lang; } @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if (id == null) { - validationException = addValidationError("id is missing", validationException); + + if (id == null || id.isEmpty()) { + validationException = addValidationError("must specify id for stored script", validationException); } else if (id.contains("#")) { - validationException = addValidationError("id can't contain: '#'", validationException); + validationException = addValidationError("id cannot contain '#' for stored script", validationException); } - if (scriptLang == null) { - validationException = addValidationError("lang is missing", validationException); - } else if (scriptLang.contains("#")) { - validationException = addValidationError("lang can't contain: '#'", validationException); + + if (lang != null && lang.contains("#")) { + validationException = addValidationError("lang cannot contain '#' for stored script", validationException); } + return validationException; } - public String scriptLang() { - return scriptLang; - } - - public DeleteStoredScriptRequest scriptLang(String type) { - this.scriptLang = type; - return this; - } - public String id() { return id; } public DeleteStoredScriptRequest id(String id) { this.id = id; + + return this; + } + + public String lang() { + return lang; + } + + public DeleteStoredScriptRequest lang(String lang) { + this.lang = lang; + return this; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - scriptLang = in.readString(); + + lang = in.readString(); + + if (lang.isEmpty()) { + lang = null; + } + id = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(scriptLang); + + out.writeString(lang == null ? "" : lang); out.writeString(id); } @Override public String toString() { - return "delete script {[" + scriptLang + "][" + id + "]}"; + return "delete stored script {id [" + id + "]" + (lang != null ? ", lang [" + lang + "]" : "") + "}"; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java index caf55a03f18..8a65506dabd 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java @@ -29,13 +29,15 @@ public class DeleteStoredScriptRequestBuilder extends AcknowledgedRequestBuilder super(client, action, new DeleteStoredScriptRequest()); } - public DeleteStoredScriptRequestBuilder setScriptLang(String scriptLang) { - request.scriptLang(scriptLang); + public DeleteStoredScriptRequestBuilder setLang(String lang) { + request.lang(lang); + return this; } public DeleteStoredScriptRequestBuilder setId(String id) { request.id(id); + return this; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java index bb7a9effd32..2bfd547362c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java @@ -28,61 +28,79 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; +import static org.elasticsearch.action.ValidateActions.addValidationError; + public class GetStoredScriptRequest extends MasterNodeReadRequest { protected String id; protected String lang; GetStoredScriptRequest() { + super(); } - public GetStoredScriptRequest(String lang, String id) { - this.lang = lang; + public GetStoredScriptRequest(String id, String lang) { + super(); + this.id = id; + this.lang = lang; } @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if (lang == null) { - validationException = ValidateActions.addValidationError("lang is missing", validationException); + + if (id == null || id.isEmpty()) { + validationException = addValidationError("must specify id for stored script", validationException); + } else if (id.contains("#")) { + validationException = addValidationError("id cannot contain '#' for stored script", validationException); } - if (id == null) { - validationException = ValidateActions.addValidationError("id is missing", validationException); + + if (lang != null && lang.contains("#")) { + validationException = addValidationError("lang cannot contain '#' for stored script", validationException); } + return validationException; } - public GetStoredScriptRequest lang(@Nullable String type) { - this.lang = type; - return this; - } - - public GetStoredScriptRequest id(String id) { - this.id = id; - return this; - } - - - public String lang() { - return lang; - } - public String id() { return id; } + public GetStoredScriptRequest id(String id) { + this.id = id; + + return this; + } + + public String lang() { + return lang; + } + + public GetStoredScriptRequest lang(String lang) { + this.lang = lang; + + return this; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); + lang = in.readString(); + + if (lang.isEmpty()) { + lang = null; + } + id = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(lang); + + out.writeString(lang == null ? "" : lang); out.writeString(id); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java index 36dd9beb38a..b8302a03c28 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java @@ -19,49 +19,70 @@ package org.elasticsearch.action.admin.cluster.storedscripts; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.script.Script; +import org.elasticsearch.script.StoredScriptSource; import java.io.IOException; public class GetStoredScriptResponse extends ActionResponse implements ToXContent { - private String storedScript; + private StoredScriptSource source; GetStoredScriptResponse() { } - GetStoredScriptResponse(String storedScript) { - this.storedScript = storedScript; + GetStoredScriptResponse(StoredScriptSource source) { + this.source = source; } /** * @return if a stored script and if not found null */ - public String getStoredScript() { - return storedScript; + public StoredScriptSource getSource() { + return source; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.value(storedScript); + source.toXContent(builder, params); + return builder; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - storedScript = in.readOptionalString(); + + if (in.readBoolean()) { + if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) { + source = new StoredScriptSource(in); + } else { + source = new StoredScriptSource(in.readString()); + } + } else { + source = null; + } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeOptionalString(storedScript); + + if (source == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + + if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) { + source.writeTo(out); + } else { + out.writeString(source.getCode()); + } + } } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index cfe153d7d96..28c70dc45b8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -19,108 +19,153 @@ package org.elasticsearch.action.admin.cluster.storedscripts; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; +import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; public class PutStoredScriptRequest extends AcknowledgedRequest { private String id; - private String scriptLang; - private BytesReference script; + private String lang; + private BytesReference content; + private XContentType xContentType; public PutStoredScriptRequest() { super(); } - public PutStoredScriptRequest(String scriptLang) { - super(); - this.scriptLang = scriptLang; + @Deprecated + public PutStoredScriptRequest(String id, String lang, BytesReference content) { + this(id, lang, content, XContentFactory.xContentType(content)); } - public PutStoredScriptRequest(String scriptLang, String id) { + public PutStoredScriptRequest(String id, String lang, BytesReference content, XContentType xContentType) { super(); - this.scriptLang = scriptLang; this.id = id; + this.lang = lang; + this.content = content; + this.xContentType = Objects.requireNonNull(xContentType); } @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if (id == null) { - validationException = addValidationError("id is missing", validationException); + + if (id == null || id.isEmpty()) { + validationException = addValidationError("must specify id for stored script", validationException); } else if (id.contains("#")) { - validationException = addValidationError("id can't contain: '#'", validationException); + validationException = addValidationError("id cannot contain '#' for stored script", validationException); } - if (scriptLang == null) { - validationException = addValidationError("lang is missing", validationException); - } else if (scriptLang.contains("#")) { - validationException = addValidationError("lang can't contain: '#'", validationException); + + if (lang != null && lang.contains("#")) { + validationException = addValidationError("lang cannot contain '#' for stored script", validationException); } - if (script == null) { - validationException = addValidationError("script is missing", validationException); + + if (content == null) { + validationException = addValidationError("must specify code for stored script", validationException); } + return validationException; } - public String scriptLang() { - return scriptLang; - } - - public PutStoredScriptRequest scriptLang(String scriptLang) { - this.scriptLang = scriptLang; - return this; - } - public String id() { return id; } public PutStoredScriptRequest id(String id) { this.id = id; + return this; } - public BytesReference script() { - return script; + public String lang() { + return lang; } - public PutStoredScriptRequest script(BytesReference source) { - this.script = source; + public PutStoredScriptRequest lang(String lang) { + this.lang = lang; + + return this; + } + + public BytesReference content() { + return content; + } + + public XContentType xContentType() { + return xContentType; + } + + /** + * Set the script source using bytes. + * @deprecated this method is deprecated as it relies on content type detection. Use {@link #content(BytesReference, XContentType)} + */ + @Deprecated + public PutStoredScriptRequest content(BytesReference content) { + return content(content, XContentFactory.xContentType(content)); + } + + /** + * Set the script source and the content type of the bytes. + */ + public PutStoredScriptRequest content(BytesReference content, XContentType xContentType) { + this.content = content; + this.xContentType = Objects.requireNonNull(xContentType); return this; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - scriptLang = in.readString(); + + lang = in.readString(); + + if (lang.isEmpty()) { + lang = null; + } + id = in.readOptionalString(); - script = in.readBytesReference(); + content = in.readBytesReference(); + if (in.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting + xContentType = XContentType.readFrom(in); + } else { + xContentType = XContentFactory.xContentType(content); + } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(scriptLang); + + out.writeString(lang == null ? "" : lang); out.writeOptionalString(id); - out.writeBytesReference(script); + out.writeBytesReference(content); + if (out.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting + xContentType.writeTo(out); + } } @Override public String toString() { - String sSource = "_na_"; + String source = "_na_"; + try { - sSource = XContentHelper.convertToJson(script, false); + source = XContentHelper.convertToJson(content, false, xContentType); } catch (Exception e) { // ignore } - return "put script {[" + id + "][" + scriptLang + "], script[" + sSource + "]}"; + + return "put stored script {id [" + id + "]" + (lang != null ? ", lang [" + lang + "]" : "") + ", content [" + source + "]}"; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java index 15c51c2ccd7..f8223d69199 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.cluster.storedscripts; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentType; public class PutStoredScriptRequestBuilder extends AcknowledgedRequestBuilder { @@ -30,19 +31,31 @@ public class PutStoredScriptRequestBuilder extends AcknowledgedRequestBuilder listener) throws Exception { - scriptService.storeScript(clusterService, request, listener); + scriptService.putStoredScript(clusterService, request, listener); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java index bb1afe5e19e..ec42e34ec96 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -31,7 +31,7 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -public class PendingClusterTasksResponse extends ActionResponse implements Iterable, ToXContent { +public class PendingClusterTasksResponse extends ActionResponse implements Iterable, ToXContentObject { private List pendingTasks; @@ -63,13 +63,15 @@ public class PendingClusterTasksResponse extends ActionResponse implements Itera StringBuilder sb = new StringBuilder(); sb.append("tasks: (").append(pendingTasks.size()).append("):\n"); for (PendingClusterTask pendingClusterTask : this) { - sb.append(pendingClusterTask.getInsertOrder()).append("/").append(pendingClusterTask.getPriority()).append("/").append(pendingClusterTask.getSource()).append("/").append(pendingClusterTask.getTimeInQueue()).append("\n"); + sb.append(pendingClusterTask.getInsertOrder()).append("/").append(pendingClusterTask.getPriority()).append("/") + .append(pendingClusterTask.getSource()).append("/").append(pendingClusterTask.getTimeInQueue()).append("\n"); } return sb.toString(); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.startArray(Fields.TASKS); for (PendingClusterTask pendingClusterTask : this) { builder.startObject(); @@ -82,6 +84,7 @@ public class PendingClusterTasksResponse extends ActionResponse implements Itera builder.endObject(); } builder.endArray(); + builder.endObject(); return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index 524a21ec632..e97facd748f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -42,6 +41,7 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilder; @@ -118,8 +118,8 @@ public class IndicesAliasesRequest extends AcknowledgedRequest parser(String name, Supplier supplier) { - ObjectParser parser = new ObjectParser<>(name, supplier); + private static ObjectParser parser(String name, Supplier supplier) { + ObjectParser parser = new ObjectParser<>(name, supplier); parser.declareString((action, index) -> { if (action.indices() != null) { throw new IllegalArgumentException("Only one of [index] and [indices] is supported"); @@ -147,7 +147,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest ADD_PARSER = parser("add", AliasActions::add); + private static final ObjectParser ADD_PARSER = parser("add", AliasActions::add); static { ADD_PARSER.declareObject(AliasActions::filter, (parser, m) -> { try { @@ -157,18 +157,17 @@ public class IndicesAliasesRequest extends AcknowledgedRequest p.text(), new ParseField("routing"), ValueType.INT); - ADD_PARSER.declareField(AliasActions::indexRouting, p -> p.text(), new ParseField("index_routing"), ValueType.INT); - ADD_PARSER.declareField(AliasActions::searchRouting, p -> p.text(), new ParseField("search_routing"), ValueType.INT); + ADD_PARSER.declareField(AliasActions::routing, XContentParser::text, new ParseField("routing"), ValueType.INT); + ADD_PARSER.declareField(AliasActions::indexRouting, XContentParser::text, new ParseField("index_routing"), ValueType.INT); + ADD_PARSER.declareField(AliasActions::searchRouting, XContentParser::text, new ParseField("search_routing"), ValueType.INT); } - private static final ObjectParser REMOVE_PARSER = parser("remove", AliasActions::remove); - private static final ObjectParser REMOVE_INDEX_PARSER = parser("remove_index", - AliasActions::removeIndex); + private static final ObjectParser REMOVE_PARSER = parser("remove", AliasActions::remove); + private static final ObjectParser REMOVE_INDEX_PARSER = parser("remove_index", AliasActions::removeIndex); /** * Parser for any one {@link AliasAction}. */ - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "alias_action", a -> { // Take the first action and complain if there are more than one actions AliasActions action = null; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index 85282788898..5a63ce8d869 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -57,7 +57,7 @@ public class GetAliasesResponse extends ActionResponse { int valueSize = in.readVInt(); List value = new ArrayList<>(valueSize); for (int j = 0; j < valueSize; j++) { - value.add(AliasMetaData.Builder.readFrom(in)); + value.add(new AliasMetaData(in)); } aliasesBuilder.put(key, Collections.unmodifiableList(value)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java index 6d0824eeb31..08f220e0199 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java @@ -75,7 +75,7 @@ public class AnalyzeRequest extends SingleShardRequest { try { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); builder.map(definition); - this.definition = Settings.builder().loadFromSource(builder.string()).build(); + this.definition = Settings.builder().loadFromSource(builder.string(), builder.contentType()).build(); } catch (IOException e) { throw new IllegalArgumentException("Failed to parse [" + definition + "]", e); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java index 344681b997e..5070862ed69 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java @@ -113,7 +113,7 @@ public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder, ToXContent { +public class AnalyzeResponse extends ActionResponse implements Iterable, ToXContentObject { - public static class AnalyzeToken implements Streamable, ToXContent { + public static class AnalyzeToken implements Streamable, ToXContentObject { private String term; private int startOffset; private int endOffset; private int position; + private int positionLength = 1; private Map attributes; private String type; AnalyzeToken() { } - public AnalyzeToken(String term, int position, int startOffset, int endOffset, String type, - Map attributes) { + public AnalyzeToken(String term, int position, int startOffset, int endOffset, int positionLength, + String type, Map attributes) { this.term = term; this.position = position; this.startOffset = startOffset; this.endOffset = endOffset; + this.positionLength = positionLength; this.type = type; this.attributes = attributes; } @@ -71,6 +73,10 @@ public class AnalyzeResponse extends ActionResponse implements Iterable 1) { + builder.field(Fields.POSITION_LENGTH, positionLength); + } if (attributes != null && !attributes.isEmpty()) { for (Map.Entry entity : attributes.entrySet()) { builder.field(entity.getKey(), entity.getValue()); @@ -108,6 +117,14 @@ public class AnalyzeResponse extends ActionResponse implements Iterable) in.readGenericValue(); @@ -120,6 +137,9 @@ public class AnalyzeResponse extends ActionResponse implements Iterable 1 ? positionLength : null); + } out.writeOptionalString(type); if (out.getVersion().onOrAfter(Version.V_2_2_0)) { out.writeGenericValue(attributes); @@ -154,6 +174,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable 0) { lastPosition = lastPosition + increment; } - tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(), lastOffset + offset.endOffset(), type.type(), null)); + tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(), + lastOffset + offset.endOffset(), posLen.getPositionLength(), type.type(), null)); } stream.end(); @@ -381,6 +384,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction } /** - * The settings to create the index with (either json/yaml/properties format) + * The settings to create the index with (either json or yaml format) + * @deprecated use {@link #source(String, XContentType)} instead to avoid content type detection */ + @Deprecated public CreateIndexRequest settings(String source) { this.settings = Settings.builder().loadFromSource(source).build(); return this; } + /** + * The settings to create the index with (either json or yaml format) + */ + public CreateIndexRequest settings(String source, XContentType xContentType) { + this.settings = Settings.builder().loadFromSource(source, xContentType).build(); + return this; + } + /** * Allows to set the settings using a json builder. */ public CreateIndexRequest settings(XContentBuilder builder) { try { - settings(builder.string()); + settings(builder.string(), builder.contentType()); } catch (IOException e) { throw new ElasticsearchGenerationException("Failed to generate json settings from builder", e); } @@ -195,7 +208,7 @@ public class CreateIndexRequest extends AcknowledgedRequest try { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); builder.map(source); - settings(builder.string()); + settings(builder.string(), XContentType.JSON); } catch (IOException e) { throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); } @@ -207,13 +220,42 @@ public class CreateIndexRequest extends AcknowledgedRequest * * @param type The mapping type * @param source The mapping source + * @deprecated use {@link #mapping(String, String, XContentType)} to avoid content type detection */ + @Deprecated public CreateIndexRequest mapping(String type, String source) { + return mapping(type, new BytesArray(source), XContentFactory.xContentType(source)); + } + + /** + * Adds mapping that will be added when the index gets created. + * + * @param type The mapping type + * @param source The mapping source + * @param xContentType The content type of the source + */ + public CreateIndexRequest mapping(String type, String source, XContentType xContentType) { + return mapping(type, new BytesArray(source), xContentType); + } + + /** + * Adds mapping that will be added when the index gets created. + * + * @param type The mapping type + * @param source The mapping source + * @param xContentType the content type of the mapping source + */ + private CreateIndexRequest mapping(String type, BytesReference source, XContentType xContentType) { if (mappings.containsKey(type)) { throw new IllegalStateException("mappings for type \"" + type + "\" were already defined"); } - mappings.put(type, source); - return this; + Objects.requireNonNull(xContentType); + try { + mappings.put(type, XContentHelper.convertToJson(source, false, false, xContentType)); + return this; + } catch (IOException e) { + throw new UncheckedIOException("failed to convert to json", e); + } } /** @@ -231,15 +273,7 @@ public class CreateIndexRequest extends AcknowledgedRequest * @param source The mapping source */ public CreateIndexRequest mapping(String type, XContentBuilder source) { - if (mappings.containsKey(type)) { - throw new IllegalStateException("mappings for type \"" + type + "\" were already defined"); - } - try { - mappings.put(type, source.string()); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to build json for mapping request", e); - } - return this; + return mapping(type, source.bytes(), source.contentType()); } /** @@ -260,7 +294,7 @@ public class CreateIndexRequest extends AcknowledgedRequest try { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); builder.map(source); - return mapping(type, builder.string()); + return mapping(type, builder); } catch (IOException e) { throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); } @@ -307,7 +341,8 @@ public class CreateIndexRequest extends AcknowledgedRequest * Sets the aliases that will be associated with the index when it gets created */ public CreateIndexRequest aliases(BytesReference source) { - try (XContentParser parser = XContentHelper.createParser(source)) { + // EMPTY is safe here because we never call namedObject + try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, source)) { //move to the first alias parser.nextToken(); while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -329,9 +364,18 @@ public class CreateIndexRequest extends AcknowledgedRequest /** * Sets the settings and mappings as a single source. + * @deprecated use {@link #source(String, XContentType)} */ + @Deprecated public CreateIndexRequest source(String source) { - return source(source.getBytes(StandardCharsets.UTF_8)); + return source(new BytesArray(source)); + } + + /** + * Sets the settings and mappings as a single source. + */ + public CreateIndexRequest source(String source, XContentType xContentType) { + return source(new BytesArray(source), xContentType); } /** @@ -343,7 +387,9 @@ public class CreateIndexRequest extends AcknowledgedRequest /** * Sets the settings and mappings as a single source. + * @deprecated use {@link #source(byte[], XContentType)} */ + @Deprecated public CreateIndexRequest source(byte[] source) { return source(source, 0, source.length); } @@ -351,6 +397,15 @@ public class CreateIndexRequest extends AcknowledgedRequest /** * Sets the settings and mappings as a single source. */ + public CreateIndexRequest source(byte[] source, XContentType xContentType) { + return source(source, 0, source.length, xContentType); + } + + /** + * Sets the settings and mappings as a single source. + * @deprecated use {@link #source(byte[], int, int, XContentType)} + */ + @Deprecated public CreateIndexRequest source(byte[] source, int offset, int length) { return source(new BytesArray(source, offset, length)); } @@ -358,17 +413,27 @@ public class CreateIndexRequest extends AcknowledgedRequest /** * Sets the settings and mappings as a single source. */ + public CreateIndexRequest source(byte[] source, int offset, int length, XContentType xContentType) { + return source(new BytesArray(source, offset, length), xContentType); + } + + /** + * Sets the settings and mappings as a single source. + * @deprecated use {@link #source(BytesReference, XContentType)} + */ + @Deprecated public CreateIndexRequest source(BytesReference source) { XContentType xContentType = XContentFactory.xContentType(source); - if (xContentType != null) { - try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(source)) { - source(parser.map()); - } catch (IOException e) { - throw new ElasticsearchParseException("failed to parse source for create index", e); - } - } else { - settings(source.utf8ToString()); - } + source(source, xContentType); + return this; + } + + /** + * Sets the settings and mappings as a single source. + */ + public CreateIndexRequest source(BytesReference source, XContentType xContentType) { + Objects.requireNonNull(xContentType); + source(XContentHelper.convertToMap(source, false, xContentType).v2()); return this; } @@ -485,7 +550,13 @@ public class CreateIndexRequest extends AcknowledgedRequest readTimeout(in); int size = in.readVInt(); for (int i = 0; i < size; i++) { - mappings.put(in.readString(), in.readString()); + final String type = in.readString(); + String source = in.readString(); + if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO change to 5.3.0 after backport + // we do not know the content type that comes from earlier versions so we autodetect and convert + source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source)); + } + mappings.put(type, source); } int customSize = in.readVInt(); for (int i = 0; i < customSize; i++) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index eaae4d53b73..237c88244b4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; import java.util.Map; @@ -76,13 +77,23 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder { @Override public String toString() { - return "flush {" + super.toString() + "}"; + return "flush {" + shardId + "}"; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 1ec7186393f..026946334ac 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -65,17 +65,7 @@ public class TransportShardFlushAction extends TransportReplicationAction { - public static enum Feature { + public enum Feature { ALIASES((byte) 0, "_aliases", "_alias"), MAPPINGS((byte) 1, "_mappings", "_mapping"), SETTINGS((byte) 2, "_settings"); @@ -52,7 +52,7 @@ public class GetIndexRequest extends ClusterInfoRequest { private final String preferredName; private final byte id; - private Feature(byte id, String... validNames) { + Feature(byte id, String... validNames) { assert validNames != null && validNames.length > 0; this.id = id; this.validNames = Arrays.asList(validNames); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index 3a29237faeb..6c2e4627523 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -104,7 +104,7 @@ public class GetIndexResponse extends ActionResponse { int valueSize = in.readVInt(); ImmutableOpenMap.Builder mappingEntryBuilder = ImmutableOpenMap.builder(); for (int j = 0; j < valueSize; j++) { - mappingEntryBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in)); + mappingEntryBuilder.put(in.readString(), new MappingMetaData(in)); } mappingsMapBuilder.put(key, mappingEntryBuilder.build()); } @@ -116,7 +116,7 @@ public class GetIndexResponse extends ActionResponse { int valueSize = in.readVInt(); List aliasEntryBuilder = new ArrayList<>(); for (int j = 0; j < valueSize; j++) { - aliasEntryBuilder.add(AliasMetaData.Builder.readFrom(in)); + aliasEntryBuilder.add(new AliasMetaData(in)); } aliasesMapBuilder.put(key, Collections.unmodifiableList(aliasEntryBuilder)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index e0cedcf841e..3f4ddaf08db 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.Mapper; import java.io.IOException; @@ -108,20 +109,25 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte /** Returns the mappings as a map. Note that the returned map has a single key which is always the field's {@link Mapper#name}. */ public Map sourceAsMap() { - return XContentHelper.convertToMap(source, true).v2(); + return XContentHelper.convertToMap(source, true, XContentType.JSON).v2(); } public boolean isNull() { return NULL.fullName().equals(fullName) && NULL.source.length() == source.length(); } + //pkg-private for testing + BytesReference getSource() { + return source; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("full_name", fullName); if (params.paramAsBoolean("pretty", false)) { builder.field("mapping", sourceAsMap()); } else { - builder.rawField("mapping", source); + builder.rawField("mapping", source, XContentType.JSON); } return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java index e092f1f148d..12975c765d0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -57,7 +57,7 @@ public class GetMappingsResponse extends ActionResponse { int valueSize = in.readVInt(); ImmutableOpenMap.Builder typeMapBuilder = ImmutableOpenMap.builder(); for (int j = 0; j < valueSize; j++) { - typeMapBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in)); + typeMapBuilder.put(in.readString(), new MappingMetaData(in)); } indexMapBuilder.put(key, typeMapBuilder.build()); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 864c6703c48..92c23bb8568 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -50,12 +50,10 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Iterator; import java.util.Map; import java.util.stream.Collectors; import static java.util.Collections.singletonMap; -import static org.elasticsearch.common.util.CollectionUtils.newLinkedList; /** * Transport action used to retrieve the mappings related to fields that belong to a specific index @@ -174,24 +172,12 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults()); } } else if (Regex.isSimpleMatchPattern(field)) { - // go through the field mappers 3 times, to make sure we give preference to the resolve order: full name, index name, name. - // also make sure we only store each mapper once. - Collection remainingFieldMappers = newLinkedList(allFieldMappers); - for (Iterator it = remainingFieldMappers.iterator(); it.hasNext(); ) { - final FieldMapper fieldMapper = it.next(); + for (FieldMapper fieldMapper : allFieldMappers) { if (Regex.simpleMatch(field, fieldMapper.fieldType().name())) { - addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults()); - it.remove(); + addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, + request.includeDefaults()); } } - for (Iterator it = remainingFieldMappers.iterator(); it.hasNext(); ) { - final FieldMapper fieldMapper = it.next(); - if (Regex.simpleMatch(field, fieldMapper.fieldType().name())) { - addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults()); - it.remove(); - } - } - } else { // not a pattern FieldMapper fieldMapper = allFieldMappers.smartNameFieldMapper(field); @@ -220,4 +206,4 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 152bc516549..400701f91ca 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -22,19 +22,24 @@ package org.elasticsearch.action.admin.indices.mapping.put; import com.carrotsearch.hppc.ObjectHashSet; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Arrays; import java.util.Map; import java.util.Objects; @@ -245,7 +250,7 @@ public class PutMappingRequest extends AcknowledgedRequest im */ public PutMappingRequest source(XContentBuilder mappingBuilder) { try { - return source(mappingBuilder.string()); + return source(mappingBuilder.string(), mappingBuilder.contentType()); } catch (IOException e) { throw new IllegalArgumentException("Failed to build json for mapping request", e); } @@ -259,7 +264,7 @@ public class PutMappingRequest extends AcknowledgedRequest im try { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); builder.map(mappingSource); - return source(builder.string()); + return source(builder.string(), XContentType.JSON); } catch (IOException e) { throw new ElasticsearchGenerationException("Failed to generate [" + mappingSource + "]", e); } @@ -267,10 +272,31 @@ public class PutMappingRequest extends AcknowledgedRequest im /** * The mapping source definition. + * @deprecated use {@link #source(String, XContentType)} */ + @Deprecated public PutMappingRequest source(String mappingSource) { - this.source = mappingSource; - return this; + return source(mappingSource, XContentFactory.xContentType(mappingSource)); + } + + /** + * The mapping source definition. + */ + public PutMappingRequest source(String mappingSource, XContentType xContentType) { + return source(new BytesArray(mappingSource), xContentType); + } + + /** + * The mapping source definition. + */ + public PutMappingRequest source(BytesReference mappingSource, XContentType xContentType) { + Objects.requireNonNull(xContentType); + try { + this.source = XContentHelper.convertToJson(mappingSource, false, false, xContentType); + return this; + } catch (IOException e) { + throw new UncheckedIOException("failed to convert source to json", e); + } } /** True if all fields that span multiple types should be updated, false otherwise */ @@ -291,6 +317,10 @@ public class PutMappingRequest extends AcknowledgedRequest im indicesOptions = IndicesOptions.readIndicesOptions(in); type = in.readOptionalString(); source = in.readString(); + if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO change to V_5_3 once backported + // we do not know the format from earlier versions so convert if necessary + source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source)); + } updateAllTypes = in.readBoolean(); readTimeout(in); concreteIndex = in.readOptionalWriteable(Index::new); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index c21c40cf041..012a593ebc4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import java.util.Map; @@ -82,12 +83,22 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder implements NamedWriteable { - public static ObjectParser, ParseFieldMatcherSupplier> PARSER = - new ObjectParser<>("conditions", null); + public static ObjectParser, Void> PARSER = new ObjectParser<>("conditions", null); static { PARSER.declareString((conditions, s) -> conditions.add(new MaxAgeCondition(TimeValue.parseTimeValue(s, MaxAgeCondition.NAME))), @@ -49,7 +47,7 @@ public abstract class Condition implements NamedWriteable { this.name = name; } - public abstract Result evaluate(final Stats stats); + public abstract Result evaluate(Stats stats); @Override public final String toString() { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index ddd58705bea..4804bc577fc 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -44,23 +43,19 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; */ public class RolloverRequest extends AcknowledgedRequest implements IndicesRequest { - public static final ObjectParser PARSER = - new ObjectParser<>("conditions", null); + public static final ObjectParser PARSER = new ObjectParser<>("conditions", null); static { - PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> - Condition.PARSER.parse(parser, request.conditions, parseFieldMatcherSupplier), + PARSER.declareField((parser, request, context) -> Condition.PARSER.parse(parser, request.conditions, null), new ParseField("conditions"), ObjectParser.ValueType.OBJECT); - PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> - request.createIndexRequest.settings(parser.map()), + PARSER.declareField((parser, request, context) -> request.createIndexRequest.settings(parser.map()), new ParseField("settings"), ObjectParser.ValueType.OBJECT); - PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> { + PARSER.declareField((parser, request, context) -> { for (Map.Entry mappingsEntry : parser.map().entrySet()) { request.createIndexRequest.mapping(mappingsEntry.getKey(), (Map) mappingsEntry.getValue()); } }, new ParseField("mappings"), ObjectParser.ValueType.OBJECT); - PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> - request.createIndexRequest.aliases(parser.map()), + PARSER.declareField((parser, request, context) -> request.createIndexRequest.aliases(parser.map()), new ParseField("aliases"), ObjectParser.ValueType.OBJECT); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index b495e3c6a0f..8c1be3501a8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.indices.rollover; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -32,7 +32,7 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; -public final class RolloverResponse extends ActionResponse implements ToXContent { +public final class RolloverResponse extends ActionResponse implements ToXContentObject { private static final String NEW_INDEX = "new_index"; private static final String OLD_INDEX = "old_index"; @@ -157,6 +157,7 @@ public final class RolloverResponse extends ActionResponse implements ToXContent @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.field(OLD_INDEX, oldIndex); builder.field(NEW_INDEX, newIndex); builder.field(ROLLED_OVER, rolledOver); @@ -168,6 +169,7 @@ public final class RolloverResponse extends ActionResponse implements ToXContent builder.field(entry.getKey(), entry.getValue()); } builder.endObject(); + builder.endObject(); return builder; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 67099b4d100..8a8d1063f3e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -62,7 +62,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeActiontrue iff the settings update should only add but not update settings. If the setting already exists * it should not be overwritten by this update. The default is false @@ -146,14 +156,14 @@ public class UpdateSettingsRequest extends AcknowledgedRequest source) { request.settings(source); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index f1df6d53e18..3657e327265 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -207,7 +207,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon builder.field(Fields.ALLOCATED, allocationStatus.value()); if (storeException != null) { builder.startObject(Fields.STORE_EXCEPTION); - ElasticsearchException.toXContent(builder, params, storeException); + ElasticsearchException.generateThrowableXContent(builder, params, storeException); builder.endObject(); } return builder; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index e13578d66de..bce20df34e3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -226,7 +226,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc private final List responses; private final List failures; - public Response(ShardId shardId, List responses, List failures) { + Response(ShardId shardId, List responses, List failures) { this.shardId = shardId; this.responses = responses; this.failures = failures; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java index 40a11402501..faa0a63c54d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -40,14 +39,11 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; */ public class ShrinkRequest extends AcknowledgedRequest implements IndicesRequest { - public static final ObjectParser PARSER = - new ObjectParser<>("shrink_request", null); + public static final ObjectParser PARSER = new ObjectParser<>("shrink_request", null); static { - PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> - request.getShrinkIndexRequest().settings(parser.map()), + PARSER.declareField((parser, request, context) -> request.getShrinkIndexRequest().settings(parser.map()), new ParseField("settings"), ObjectParser.ValueType.OBJECT); - PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> - request.getShrinkIndexRequest().aliases(parser.map()), + PARSER.declareField((parser, request, context) -> request.getShrinkIndexRequest().aliases(parser.map()), new ParseField("aliases"), ObjectParser.ValueType.OBJECT); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java index 6d27b03db63..8c482eac10c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java @@ -131,6 +131,9 @@ public class TransportShrinkAction extends TransportMasterNodeAction indexTemplates; @@ -52,7 +53,7 @@ public class GetIndexTemplatesResponse extends ActionResponse implements ToXCont int size = in.readVInt(); indexTemplates = new ArrayList<>(size); for (int i = 0 ; i < size ; i++) { - indexTemplates.add(0, IndexTemplateMetaData.Builder.readFrom(in)); + indexTemplates.add(0, IndexTemplateMetaData.readFrom(in)); } } @@ -68,10 +69,11 @@ public class GetIndexTemplatesResponse extends ActionResponse implements ToXCont @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { params = new ToXContent.DelegatingMapParams(singletonMap("reduce_mappings", "true"), params); - + builder.startObject(); for (IndexTemplateMetaData indexTemplateMetaData : getIndexTemplates()) { IndexTemplateMetaData.Builder.toXContent(indexTemplateMetaData, builder, params); } + builder.endObject(); return builder; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 77c3a1d4c29..b0c13540dfa 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; @@ -44,11 +45,13 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -178,21 +181,31 @@ public class PutIndexTemplateRequest extends MasterNodeRequest source) { try { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); builder.map(source); - settings(builder.string()); + settings(builder.string(), XContentType.JSON); } catch (IOException e) { throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); } @@ -208,10 +221,23 @@ public class PutIndexTemplateRequest extends MasterNodeRequest source) { request.settings(source); @@ -120,12 +131,26 @@ public class PutIndexTemplateRequestBuilder * * @param type The mapping type * @param source The mapping source + * @deprecated use {@link #addMapping(String, String, XContentType)} */ + @Deprecated public PutIndexTemplateRequestBuilder addMapping(String type, String source) { request.mapping(type, source); return this; } + /** + * Adds mapping that will be added when the index template gets created. + * + * @param type The mapping type + * @param source The mapping source + * @param xContentType The type/format of the source + */ + public PutIndexTemplateRequestBuilder addMapping(String type, String source, XContentType xContentType) { + request.mapping(type, source, xContentType); + return this; + } + /** * A specialized simplified mapping source method, takes the form of simple properties definition: * ("field1", "type=string,store=true"). @@ -226,7 +251,9 @@ public class PutIndexTemplateRequestBuilder /** * The template source definition. + * @deprecated use {@link #setSource(BytesReference, XContentType)} */ + @Deprecated public PutIndexTemplateRequestBuilder setSource(String templateSource) { request.source(templateSource); return this; @@ -235,6 +262,16 @@ public class PutIndexTemplateRequestBuilder /** * The template source definition. */ + public PutIndexTemplateRequestBuilder setSource(BytesReference templateSource, XContentType xContentType) { + request.source(templateSource, xContentType); + return this; + } + + /** + * The template source definition. + * @deprecated use {@link #setSource(BytesReference, XContentType)} + */ + @Deprecated public PutIndexTemplateRequestBuilder setSource(BytesReference templateSource) { request.source(templateSource); return this; @@ -242,7 +279,9 @@ public class PutIndexTemplateRequestBuilder /** * The template source definition. + * @deprecated use {@link #setSource(byte[], XContentType)} */ + @Deprecated public PutIndexTemplateRequestBuilder setSource(byte[] templateSource) { request.source(templateSource); return this; @@ -251,8 +290,26 @@ public class PutIndexTemplateRequestBuilder /** * The template source definition. */ + public PutIndexTemplateRequestBuilder setSource(byte[] templateSource, XContentType xContentType) { + request.source(templateSource, xContentType); + return this; + } + + /** + * The template source definition. + * @deprecated use {@link #setSource(byte[], int, int, XContentType)} + */ + @Deprecated public PutIndexTemplateRequestBuilder setSource(byte[] templateSource, int offset, int length) { request.source(templateSource, offset, length); return this; } + + /** + * The template source definition. + */ + public PutIndexTemplateRequestBuilder setSource(byte[] templateSource, int offset, int length, XContentType xContentType) { + request.source(templateSource, offset, length, xContentType); + return this; + } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java b/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java index bc8f8c347ab..81084e22377 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java @@ -171,7 +171,7 @@ public abstract class BackoffPolicy implements Iterable { private final int numberOfElements; - public ConstantBackoff(TimeValue delay, int numberOfElements) { + ConstantBackoff(TimeValue delay, int numberOfElements) { assert numberOfElements >= 0; this.delay = delay; this.numberOfElements = numberOfElements; @@ -188,7 +188,7 @@ public abstract class BackoffPolicy implements Iterable { private final int numberOfElements; private int curr; - public ConstantBackoffIterator(TimeValue delay, int numberOfElements) { + ConstantBackoffIterator(TimeValue delay, int numberOfElements) { this.delay = delay; this.numberOfElements = numberOfElements; } @@ -212,7 +212,7 @@ public abstract class BackoffPolicy implements Iterable { private final BackoffPolicy delegate; private final Runnable onBackoff; - public WrappedBackoffPolicy(BackoffPolicy delegate, Runnable onBackoff) { + WrappedBackoffPolicy(BackoffPolicy delegate, Runnable onBackoff) { this.delegate = delegate; this.onBackoff = onBackoff; } @@ -227,7 +227,7 @@ public abstract class BackoffPolicy implements Iterable { private final Iterator delegate; private final Runnable onBackoff; - public WrappedBackoffIterator(Iterator delegate, Runnable onBackoff) { + WrappedBackoffIterator(Iterator delegate, Runnable onBackoff) { this.delegate = delegate; this.onBackoff = onBackoff; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index e1a6e48e9e0..6b8d34098aa 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -22,8 +22,8 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; -import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocWriteRequest.OpType; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateResponse; @@ -32,7 +32,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.StatusToXContent; +import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; @@ -43,7 +43,7 @@ import java.io.IOException; * Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id * of the relevant action, and if it has failed or not (with the failure message incase it failed). */ -public class BulkItemResponse implements Streamable, StatusToXContent { +public class BulkItemResponse implements Streamable, StatusToXContentObject { @Override public RestStatus status() { @@ -52,9 +52,10 @@ public class BulkItemResponse implements Streamable, StatusToXContent { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.startObject(opType.getLowercase()); if (failure == null) { - response.toXContent(builder, params); + response.innerToXContent(builder, params); builder.field(Fields.STATUS, response.status().getStatus()); } else { builder.field(Fields._INDEX, failure.getIndex()); @@ -62,10 +63,11 @@ public class BulkItemResponse implements Streamable, StatusToXContent { builder.field(Fields._ID, failure.getId()); builder.field(Fields.STATUS, failure.getStatus().getStatus()); builder.startObject(Fields.ERROR); - ElasticsearchException.toXContent(builder, params, failure.getCause()); + ElasticsearchException.generateThrowableXContent(builder, params, failure.getCause()); builder.endObject(); } builder.endObject(); + builder.endObject(); return builder; } @@ -90,10 +92,10 @@ public class BulkItemResponse implements Streamable, StatusToXContent { private final String index; private final String type; private final String id; - private final Throwable cause; + private final Exception cause; private final RestStatus status; - public Failure(String index, String type, String id, Throwable cause) { + public Failure(String index, String type, String id, Exception cause) { this.index = index; this.type = type; this.id = id; @@ -159,7 +161,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { /** * The actual cause of the failure. */ - public Throwable getCause() { + public Exception getCause() { return cause; } @@ -171,7 +173,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { builder.field(ID_FIELD, id); } builder.startObject(CAUSE_FIELD); - ElasticsearchException.toXContent(builder, params, cause); + ElasticsearchException.generateThrowableXContent(builder, params, cause); builder.endObject(); builder.field(STATUS_FIELD, status.getStatus()); return builder; @@ -179,7 +181,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { @Override public String toString() { - return Strings.toString(this, true); + return Strings.toString(this); } } @@ -302,7 +304,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { @Override public void readFrom(StreamInput in) throws IOException { id = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) { opType = OpType.fromId(in.readByte()); } else { opType = OpType.fromString(in.readString()); @@ -315,6 +317,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { } else if (type == 1) { response = new DeleteResponse(); response.readFrom(in); + } else if (type == 3) { // make 3 instead of 2, because 2 is already in use for 'no responses' response = new UpdateResponse(); response.readFrom(in); @@ -328,7 +331,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) { out.writeByte(opType.getId()); } else { out.writeString(opType.getLowercase()); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 5c3e8d9295e..371659586f9 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -39,9 +39,11 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -243,7 +245,9 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques /** * Adds a framed data in binary format + * @deprecated use {@link #add(byte[], int, int, XContentType)} */ + @Deprecated public BulkRequest add(byte[] data, int from, int length) throws IOException { return add(data, from, length, null, null); } @@ -251,6 +255,15 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques /** * Adds a framed data in binary format */ + public BulkRequest add(byte[] data, int from, int length, XContentType xContentType) throws IOException { + return add(data, from, length, null, null, xContentType); + } + + /** + * Adds a framed data in binary format + * @deprecated use {@link #add(byte[], int, int, String, String, XContentType)} + */ + @Deprecated public BulkRequest add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType) throws IOException { return add(new BytesArray(data, from, length), defaultIndex, defaultType); } @@ -258,6 +271,17 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques /** * Adds a framed data in binary format */ + public BulkRequest add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType, + XContentType xContentType) throws IOException { + return add(new BytesArray(data, from, length), defaultIndex, defaultType, xContentType); + } + + /** + * Adds a framed data in binary format + * + * @deprecated use {@link #add(BytesReference, String, String, XContentType)} + */ + @Deprecated public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws IOException { return add(data, defaultIndex, defaultType, null, null, null, null, null, true); } @@ -265,12 +289,40 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques /** * Adds a framed data in binary format */ + public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, + XContentType xContentType) throws IOException { + return add(data, defaultIndex, defaultType, null, null, null, null, null, true, xContentType); + } + + /** + * Adds a framed data in binary format + * + * @deprecated use {@link #add(BytesReference, String, String, boolean, XContentType)} + */ + @Deprecated public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws IOException { return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex); } + /** + * Adds a framed data in binary format + */ + public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex, + XContentType xContentType) throws IOException { + return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex, xContentType); + } + + @Deprecated public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws IOException { - XContent xContent = XContentFactory.xContent(data); + XContentType xContentType = XContentFactory.xContentType(data); + return add(data, defaultIndex, defaultType, defaultRouting, defaultFields, defaultFetchSourceContext, defaultPipeline, payload, + allowExplicitIndex, xContentType); + } + + public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String + defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String + defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex, XContentType xContentType) throws IOException { + XContent xContent = xContentType.xContent(); int line = 0; int from = 0; int length = data.length(); @@ -283,7 +335,8 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques line++; // now parse the action - try (XContentParser parser = xContent.createParser(data.slice(from, nextMarker - from))) { + // EMPTY is safe here because we never call namedObject + try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, data.slice(from, nextMarker - from))) { // move pointers from = nextMarker + 1; @@ -347,7 +400,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques } else if ("fields".equals(currentFieldName)) { throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected"); } else if ("_source".equals(currentFieldName)) { - fetchSourceContext = FetchSourceContext.parse(parser); + fetchSourceContext = FetchSourceContext.fromXContent(parser); } else { throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]"); } @@ -360,7 +413,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); } } else if (token == XContentParser.Token.START_OBJECT && "_source".equals(currentFieldName)) { - fetchSourceContext = FetchSourceContext.parse(parser); + fetchSourceContext = FetchSourceContext.fromXContent(parser); } else if (token != XContentParser.Token.VALUE_NULL) { throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); } @@ -385,22 +438,23 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques if ("index".equals(action)) { if (opType == null) { internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType) - .setPipeline(pipeline).source(data.slice(from, nextMarker - from)), payload); + .setPipeline(pipeline).source(data.slice(from, nextMarker - from), xContentType), payload); } else { internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType) .create("create".equals(opType)).setPipeline(pipeline) - .source(data.slice(from, nextMarker - from)), payload); + .source(data.slice(from, nextMarker - from), xContentType), payload); } } else if ("create".equals(action)) { internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType) .create(true).setPipeline(pipeline) - .source(data.slice(from, nextMarker - from)), payload); + .source(data.slice(from, nextMarker - from), xContentType), payload); } else if ("update".equals(action)) { UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).parent(parent).retryOnConflict(retryOnConflict) .version(version).versionType(versionType) .routing(routing) .parent(parent); - try (XContentParser sliceParser = xContent.createParser(data.slice(from, nextMarker - from))) { + // EMPTY is safe here because we never call namedObject + try (XContentParser sliceParser = xContent.createParser(NamedXContentRegistry.EMPTY, data.slice(from, nextMarker - from))) { updateRequest.fromXContent(sliceParser); } if (fetchSourceContext != null) { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index c48a8f507b8..8f634fa28a4 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; /** * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes @@ -97,7 +98,9 @@ public class BulkRequestBuilder extends ActionRequestBuilder scheduledRequestFuture; - public AbstractRetryHandler(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener listener) { + AbstractRetryHandler(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener listener) { this.retryOnThrowable = retryOnThrowable; this.backoff = backoffPolicy.iterator(); this.client = client; @@ -213,7 +213,7 @@ public class Retry { } static class AsyncRetryHandler extends AbstractRetryHandler { - public AsyncRetryHandler(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener listener) { + AsyncRetryHandler(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener listener) { super(retryOnThrowable, backoffPolicy, client, listener); } } @@ -226,7 +226,7 @@ public class Retry { return new SyncRetryHandler(retryOnThrowable, backoffPolicy, client, actionFuture); } - public SyncRetryHandler(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client, PlainActionFuture actionFuture) { + SyncRetryHandler(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client, PlainActionFuture actionFuture) { super(retryOnThrowable, backoffPolicy, client, actionFuture); this.actionFuture = actionFuture; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 27a579db276..e7c2018ad31 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -31,8 +31,6 @@ import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.IngestActionForwarder; import org.elasticsearch.action.support.ActionFilters; @@ -41,6 +39,8 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.update.TransportUpdateAction; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -49,11 +49,15 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexClosedException; +import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -153,11 +157,7 @@ public class TransportBulkAction extends HandledTransportAction { inner.addSuppressed(e); listener.onFailure(inner); - } + }), responses); } } }); @@ -209,134 +207,200 @@ public class TransportBulkAction extends HandledTransportAction listener) { - final long startTimeNanos = relativeTime(); - executeBulk(null, bulkRequest, startTimeNanos, listener, new AtomicArray<>(bulkRequest.requests.size())); - } - private long buildTookInMillis(long startTimeNanos) { return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos); } - void executeBulk(Task task, final BulkRequest bulkRequest, final long startTimeNanos, final ActionListener listener, final AtomicArray responses ) { - final ClusterState clusterState = clusterService.state(); - // TODO use timeout to wait here if its blocked... - clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.WRITE); + /** + * retries on retryable cluster blocks, resolves item requests, + * constructs shard bulk requests and delegates execution to shard bulk action + * */ + private final class BulkOperation extends AbstractRunnable { + private final Task task; + private final BulkRequest bulkRequest; + private final ActionListener listener; + private final AtomicArray responses; + private final long startTimeNanos; + private final ClusterStateObserver observer; - final ConcreteIndices concreteIndices = new ConcreteIndices(clusterState, indexNameExpressionResolver); - MetaData metaData = clusterState.metaData(); - for (int i = 0; i < bulkRequest.requests.size(); i++) { - DocWriteRequest docWriteRequest = bulkRequest.requests.get(i); - //the request can only be null because we set it to null in the previous step, so it gets ignored - if (docWriteRequest == null) { - continue; + BulkOperation(Task task, BulkRequest bulkRequest, ActionListener listener, + AtomicArray responses, long startTimeNanos) { + this.task = task; + this.bulkRequest = bulkRequest; + this.listener = listener; + this.responses = responses; + this.startTimeNanos = startTimeNanos; + this.observer = new ClusterStateObserver(clusterService, bulkRequest.timeout(), logger, threadPool.getThreadContext()); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + + @Override + protected void doRun() throws Exception { + final ClusterState clusterState = observer.setAndGetObservedState(); + if (handleBlockExceptions(clusterState)) { + return; } - if (addFailureIfIndexIsUnavailable(docWriteRequest, bulkRequest, responses, i, concreteIndices, metaData)) { - continue; - } - Index concreteIndex = concreteIndices.resolveIfAbsent(docWriteRequest); - try { - switch (docWriteRequest.opType()) { - case CREATE: - case INDEX: - IndexRequest indexRequest = (IndexRequest) docWriteRequest; - MappingMetaData mappingMd = null; - final IndexMetaData indexMetaData = metaData.index(concreteIndex); - if (indexMetaData != null) { - mappingMd = indexMetaData.mappingOrDefault(indexRequest.type()); - } - indexRequest.resolveRouting(metaData); - indexRequest.process(mappingMd, allowIdGeneration, concreteIndex.getName()); - break; - case UPDATE: - TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest) docWriteRequest); - break; - case DELETE: - TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (DeleteRequest) docWriteRequest); - break; - default: throw new AssertionError("request type not supported: [" + docWriteRequest.opType() + "]"); + final ConcreteIndices concreteIndices = new ConcreteIndices(clusterState, indexNameExpressionResolver); + MetaData metaData = clusterState.metaData(); + for (int i = 0; i < bulkRequest.requests.size(); i++) { + DocWriteRequest docWriteRequest = bulkRequest.requests.get(i); + //the request can only be null because we set it to null in the previous step, so it gets ignored + if (docWriteRequest == null) { + continue; + } + if (addFailureIfIndexIsUnavailable(docWriteRequest, bulkRequest, responses, i, concreteIndices, metaData)) { + continue; + } + Index concreteIndex = concreteIndices.resolveIfAbsent(docWriteRequest); + try { + switch (docWriteRequest.opType()) { + case CREATE: + case INDEX: + IndexRequest indexRequest = (IndexRequest) docWriteRequest; + MappingMetaData mappingMd = null; + final IndexMetaData indexMetaData = metaData.index(concreteIndex); + if (indexMetaData != null) { + mappingMd = indexMetaData.mappingOrDefault(indexRequest.type()); + } + indexRequest.resolveRouting(metaData); + indexRequest.process(mappingMd, allowIdGeneration, concreteIndex.getName()); + break; + case UPDATE: + TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest) docWriteRequest); + break; + case DELETE: + docWriteRequest.routing(metaData.resolveIndexRouting(docWriteRequest.parent(), docWriteRequest.routing(), docWriteRequest.index())); + // check if routing is required, if so, throw error if routing wasn't specified + if (docWriteRequest.routing() == null && metaData.routingRequired(concreteIndex.getName(), docWriteRequest.type())) { + throw new RoutingMissingException(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id()); + } + break; + default: throw new AssertionError("request type not supported: [" + docWriteRequest.opType() + "]"); + } + } catch (ElasticsearchParseException | RoutingMissingException e) { + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id(), e); + BulkItemResponse bulkItemResponse = new BulkItemResponse(i, docWriteRequest.opType(), failure); + responses.set(i, bulkItemResponse); + // make sure the request gets never processed again + bulkRequest.requests.set(i, null); } - } catch (ElasticsearchParseException | RoutingMissingException e) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id(), e); - BulkItemResponse bulkItemResponse = new BulkItemResponse(i, docWriteRequest.opType(), failure); - responses.set(i, bulkItemResponse); - // make sure the request gets never processed again - bulkRequest.requests.set(i, null); } - } - // first, go over all the requests and create a ShardId -> Operations mapping - Map> requestsByShard = new HashMap<>(); - for (int i = 0; i < bulkRequest.requests.size(); i++) { - DocWriteRequest request = bulkRequest.requests.get(i); - if (request == null) { - continue; + // first, go over all the requests and create a ShardId -> Operations mapping + Map> requestsByShard = new HashMap<>(); + for (int i = 0; i < bulkRequest.requests.size(); i++) { + DocWriteRequest request = bulkRequest.requests.get(i); + if (request == null) { + continue; + } + String concreteIndex = concreteIndices.getConcreteIndex(request.index()).getName(); + ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, request.id(), request.routing()).shardId(); + List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); + shardRequests.add(new BulkItemRequest(i, request)); } - String concreteIndex = concreteIndices.getConcreteIndex(request.index()).getName(); - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, request.id(), request.routing()).shardId(); - List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); - shardRequests.add(new BulkItemRequest(i, request)); - } - if (requestsByShard.isEmpty()) { - listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos))); - return; - } - - final AtomicInteger counter = new AtomicInteger(requestsByShard.size()); - String nodeId = clusterService.localNode().getId(); - for (Map.Entry> entry : requestsByShard.entrySet()) { - final ShardId shardId = entry.getKey(); - final List requests = entry.getValue(); - BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, bulkRequest.getRefreshPolicy(), - requests.toArray(new BulkItemRequest[requests.size()])); - bulkShardRequest.waitForActiveShards(bulkRequest.waitForActiveShards()); - bulkShardRequest.timeout(bulkRequest.timeout()); - if (task != null) { - bulkShardRequest.setParentTask(nodeId, task.getId()); + if (requestsByShard.isEmpty()) { + listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos))); + return; } - shardBulkAction.execute(bulkShardRequest, new ActionListener() { - @Override - public void onResponse(BulkShardResponse bulkShardResponse) { - for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { - // we may have no response if item failed - if (bulkItemResponse.getResponse() != null) { - bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); + + final AtomicInteger counter = new AtomicInteger(requestsByShard.size()); + String nodeId = clusterService.localNode().getId(); + for (Map.Entry> entry : requestsByShard.entrySet()) { + final ShardId shardId = entry.getKey(); + final List requests = entry.getValue(); + BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, bulkRequest.getRefreshPolicy(), + requests.toArray(new BulkItemRequest[requests.size()])); + bulkShardRequest.waitForActiveShards(bulkRequest.waitForActiveShards()); + bulkShardRequest.timeout(bulkRequest.timeout()); + if (task != null) { + bulkShardRequest.setParentTask(nodeId, task.getId()); + } + shardBulkAction.execute(bulkShardRequest, new ActionListener() { + @Override + public void onResponse(BulkShardResponse bulkShardResponse) { + for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { + // we may have no response if item failed + if (bulkItemResponse.getResponse() != null) { + bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); + } + responses.set(bulkItemResponse.getItemId(), bulkItemResponse); + } + if (counter.decrementAndGet() == 0) { + finishHim(); } - responses.set(bulkItemResponse.getItemId(), bulkItemResponse); } - if (counter.decrementAndGet() == 0) { - finishHim(); + + @Override + public void onFailure(Exception e) { + // create failures for all relevant requests + for (BulkItemRequest request : requests) { + final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); + DocWriteRequest docWriteRequest = request.request(); + responses.set(request.id(), new BulkItemResponse(request.id(), docWriteRequest.opType(), + new BulkItemResponse.Failure(indexName, docWriteRequest.type(), docWriteRequest.id(), e))); + } + if (counter.decrementAndGet() == 0) { + finishHim(); + } } + + private void finishHim() { + listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos))); + } + }); + } + } + + private boolean handleBlockExceptions(ClusterState state) { + ClusterBlockException blockException = state.blocks().globalBlockedException(ClusterBlockLevel.WRITE); + if (blockException != null) { + if (blockException.retryable()) { + logger.trace("cluster is blocked, scheduling a retry", blockException); + retry(blockException); + } else { + onFailure(blockException); + } + return true; + } + return false; + } + + void retry(Exception failure) { + assert failure != null; + if (observer.isTimedOut()) { + // we running as a last attempt after a timeout has happened. don't retry + onFailure(failure); + return; + } + observer.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + run(); } @Override - public void onFailure(Exception e) { - // create failures for all relevant requests - for (BulkItemRequest request : requests) { - final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); - DocWriteRequest docWriteRequest = request.request(); - responses.set(request.id(), new BulkItemResponse(request.id(), docWriteRequest.opType(), - new BulkItemResponse.Failure(indexName, docWriteRequest.type(), docWriteRequest.id(), e))); - } - if (counter.decrementAndGet() == 0) { - finishHim(); - } + public void onClusterServiceClose() { + onFailure(new NodeClosedException(clusterService.localNode())); } - private void finishHim() { - listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos))); + @Override + public void onTimeout(TimeValue timeout) { + // Try one more time... + run(); } }); } } + void executeBulk(Task task, final BulkRequest bulkRequest, final long startTimeNanos, final ActionListener listener, final AtomicArray responses ) { + new BulkOperation(task, bulkRequest, listener, responses, startTimeNanos).run(); + } + private boolean addFailureIfIndexIsUnavailable(DocWriteRequest request, BulkRequest bulkRequest, AtomicArray responses, int idx, final ConcreteIndices concreteIndices, final MetaData metaData) { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index cef89e1ce78..f5faa184036 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.bulk; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -28,6 +29,8 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.ReplicationOperation; +import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.update.UpdateHelper; @@ -47,28 +50,22 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.EngineClosedException; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; +import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardClosedException; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.Map; -import static org.elasticsearch.action.delete.TransportDeleteAction.executeDeleteRequestOnPrimary; -import static org.elasticsearch.action.delete.TransportDeleteAction.executeDeleteRequestOnReplica; -import static org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnPrimary; -import static org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnReplica; -import static org.elasticsearch.action.support.replication.ReplicationOperation.ignoreReplicaException; -import static org.elasticsearch.action.support.replication.ReplicationOperation.isConflictException; - /** Performs shard-level bulk (index, delete or update) operations */ public class TransportShardBulkAction extends TransportWriteAction { @@ -106,7 +103,8 @@ public class TransportShardBulkAction extends TransportWriteAction shardOperationOnPrimary( + BulkShardRequest request, IndexShard primary) throws Exception { final IndexMetaData metaData = primary.indexSettings().getIndexMetaData(); long[] preVersions = new long[request.items().length]; @@ -122,7 +120,7 @@ public class TransportShardBulkAction extends TransportWriteAction(request, response, location, null, primary, logger); } /** Executes bulk item requests and handles request execution exceptions */ @@ -151,7 +149,7 @@ public class TransportShardBulkAction extends TransportWriteAction 0)) { Tuple> sourceAndContent = - XContentHelper.convertToMap(indexSourceAsBytes, true); + XContentHelper.convertToMap(indexSourceAsBytes, true, updateIndexRequest.getContentType()); updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); } @@ -349,9 +351,9 @@ public class TransportShardBulkAction extends TransportWriteAction shardOperationOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { Translog.Location location = null; for (int i = 0; i < request.items().length; i++) { BulkItemRequest item = request.items()[i]; @@ -388,11 +390,9 @@ public class TransportShardBulkAction extends TransportWriteAction(request, location, null, replica, logger); } private Translog.Location locationToSync(Translog.Location current, Translog.Location next) { @@ -421,4 +421,79 @@ public class TransportShardBulkAction extends TransportWriteAction, + Response extends ReplicationResponse & WriteResponse + > extends TransportWriteAction { + + private final TransportBulkAction bulkAction; + private final TransportShardBulkAction shardBulkAction; + + + protected TransportSingleItemBulkWriteAction(Settings settings, String actionName, TransportService transportService, + ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, + ShardStateAction shardStateAction, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, + Supplier replicaRequest, String executor, + TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) { + super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + indexNameExpressionResolver, request, replicaRequest, executor); + this.bulkAction = bulkAction; + this.shardBulkAction = shardBulkAction; + } + + + @Override + protected void doExecute(Task task, final Request request, final ActionListener listener) { + bulkAction.execute(task, toSingleItemBulkRequest(request), wrapBulkResponse(listener)); + } + + @Override + protected WritePrimaryResult shardOperationOnPrimary( + Request request, final IndexShard primary) throws Exception { + BulkItemRequest[] itemRequests = new BulkItemRequest[1]; + WriteRequest.RefreshPolicy refreshPolicy = request.getRefreshPolicy(); + request.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE); + itemRequests[0] = new BulkItemRequest(0, ((DocWriteRequest) request)); + BulkShardRequest bulkShardRequest = new BulkShardRequest(request.shardId(), refreshPolicy, itemRequests); + WritePrimaryResult bulkResult = + shardBulkAction.shardOperationOnPrimary(bulkShardRequest, primary); + assert bulkResult.finalResponseIfSuccessful.getResponses().length == 1 : "expected only one bulk shard response"; + BulkItemResponse itemResponse = bulkResult.finalResponseIfSuccessful.getResponses()[0]; + final Response response; + final Exception failure; + if (itemResponse.isFailed()) { + failure = itemResponse.getFailure().getCause(); + response = null; + } else { + response = (Response) itemResponse.getResponse(); + failure = null; + } + return new WritePrimaryResult<>(request, response, bulkResult.location, failure, primary, logger); + } + + @Override + protected WriteReplicaResult shardOperationOnReplica( + Request replicaRequest, IndexShard replica) throws Exception { + BulkItemRequest[] itemRequests = new BulkItemRequest[1]; + WriteRequest.RefreshPolicy refreshPolicy = replicaRequest.getRefreshPolicy(); + itemRequests[0] = new BulkItemRequest(0, ((DocWriteRequest) replicaRequest)); + BulkShardRequest bulkShardRequest = new BulkShardRequest(replicaRequest.shardId(), refreshPolicy, itemRequests); + WriteReplicaResult result = shardBulkAction.shardOperationOnReplica(bulkShardRequest, replica); + // a replica operation can never throw a document-level failure, + // as the same document has been already indexed successfully in the primary + return new WriteReplicaResult<>(replicaRequest, result.location, null, replica, logger); + } + + + public static + ActionListener wrapBulkResponse(ActionListener listener) { + return ActionListener.wrap(bulkItemResponses -> { + assert bulkItemResponses.getItems().length == 1 : "expected only one item in bulk request"; + BulkItemResponse bulkItemResponse = bulkItemResponses.getItems()[0]; + if (bulkItemResponse.isFailed() == false) { + final DocWriteResponse response = bulkItemResponse.getResponse(); + listener.onResponse((Response) response); + } else { + listener.onFailure(bulkItemResponse.getFailure().getCause()); + } + }, listener::onFailure); + } + + public static BulkRequest toSingleItemBulkRequest(ReplicatedWriteRequest request) { + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(((DocWriteRequest) request)); + bulkRequest.setRefreshPolicy(request.getRefreshPolicy()); + bulkRequest.timeout(request.timeout()); + bulkRequest.waitForActiveShards(request.waitForActiveShards()); + request.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE); + return bulkRequest; + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java similarity index 51% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java rename to core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java index 34ad6e2ee25..834321f1798 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java @@ -17,16 +17,29 @@ * under the License. */ -package org.elasticsearch.index.reindex; +package org.elasticsearch.action.bulk.byscroll; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.bulk.BackoffPolicy; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.Retry; +import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.ParentTaskAssigningClient; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper; @@ -40,27 +53,60 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.threadpool.ThreadPool; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; +import static java.lang.Math.max; +import static java.lang.Math.min; +import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableList; +import static org.elasticsearch.action.bulk.BackoffPolicy.exponentialBackoff; +import static org.elasticsearch.action.bulk.byscroll.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES; +import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; +import static org.elasticsearch.rest.RestStatus.CONFLICT; +import static org.elasticsearch.search.sort.SortBuilders.fieldSort; /** - * Abstract base for scrolling across a search and executing bulk indexes on all - * results. + * Abstract base for scrolling across a search and executing bulk actions on all results. All package private methods are package private so + * their tests can use them. Most methods run in the listener thread pool because the are meant to be fast and don't expect to block. */ -public abstract class AbstractAsyncBulkIndexByScrollAction> - extends AbstractAsyncBulkByScrollAction { - +public abstract class AbstractAsyncBulkByScrollAction> { + protected final Logger logger; + protected final WorkingBulkByScrollTask task; + protected final ThreadPool threadPool; protected final ScriptService scriptService; protected final ClusterState clusterState; + /** + * The request for this action. Named mainRequest because we create lots of request variables all representing child + * requests of this mainRequest. + */ + protected final Request mainRequest; + + private final AtomicLong startTime = new AtomicLong(-1); + private final Set destinationIndices = Collections.newSetFromMap(new ConcurrentHashMap<>()); + + private final ParentTaskAssigningClient client; + private final ActionListener listener; + private final Retry bulkRetry; + private final ScrollableHitSource scrollSource; + /** * This BiFunction is used to apply various changes depending of the Reindex action and the search hit, * from copying search hit metadata (parent, routing, etc) to potentially transforming the @@ -68,54 +114,47 @@ public abstract class AbstractAsyncBulkIndexByScrollAction, ScrollableHitSource.Hit, RequestWrapper> scriptApplier; - public AbstractAsyncBulkIndexByScrollAction(WorkingBulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, - ThreadPool threadPool, Request mainRequest, - ActionListener listener, - ScriptService scriptService, ClusterState clusterState) { - super(task, logger, client, threadPool, mainRequest, listener); + public AbstractAsyncBulkByScrollAction(WorkingBulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, + ThreadPool threadPool, Request mainRequest, ScriptService scriptService, ClusterState clusterState, + ActionListener listener) { + this.task = task; + this.logger = logger; + this.client = client; + this.threadPool = threadPool; this.scriptService = scriptService; this.clusterState = clusterState; - this.scriptApplier = Objects.requireNonNull(buildScriptApplier(), "script applier must not be null"); + this.mainRequest = mainRequest; + this.listener = listener; + BackoffPolicy backoffPolicy = buildBackoffPolicy(); + bulkRetry = Retry.on(EsRejectedExecutionException.class).policy(BackoffPolicy.wrap(backoffPolicy, task::countBulkRetry)); + scrollSource = buildScrollableResultSource(backoffPolicy); + scriptApplier = Objects.requireNonNull(buildScriptApplier(), "script applier must not be null"); + /* + * Default to sorting by doc. We can't do this in the request itself because it is normal to *add* to the sorts rather than replace + * them and if we add _doc as the first sort by default then sorts will never work.... So we add it here, only if there isn't + * another sort. + */ + List> sorts = mainRequest.getSearchRequest().source().sorts(); + if (sorts == null || sorts.isEmpty()) { + mainRequest.getSearchRequest().source().sort(fieldSort("_doc")); + } + mainRequest.getSearchRequest().source().version(needsSourceDocumentVersions()); } /** * Build the {@link BiFunction} to apply to all {@link RequestWrapper}. + * + * Public for testings.... */ - protected BiFunction, ScrollableHitSource.Hit, RequestWrapper> buildScriptApplier() { + public BiFunction, ScrollableHitSource.Hit, RequestWrapper> buildScriptApplier() { // The default script applier executes a no-op return (request, searchHit) -> request; } - @Override - protected BulkRequest buildBulk(Iterable docs) { - BulkRequest bulkRequest = new BulkRequest(); - for (ScrollableHitSource.Hit doc : docs) { - if (accept(doc)) { - RequestWrapper request = scriptApplier.apply(copyMetadata(buildRequest(doc), doc), doc); - if (request != null) { - bulkRequest.add(request.self()); - } - } - } - return bulkRequest; - } - /** - * Used to accept or ignore a search hit. Ignored search hits will be excluded - * from the bulk request. It is also where we fail on invalid search hits, like - * when the document has no source but it's required. + * Does this operation need the versions of the source documents? */ - protected boolean accept(ScrollableHitSource.Hit doc) { - if (doc.getSource() == null) { - /* - * Either the document didn't store _source or we didn't fetch it for some reason. Since we don't allow the user to - * change the "fields" part of the search request it is unlikely that we got here because we didn't fetch _source. - * Thus the error message assumes that it wasn't stored. - */ - throw new IllegalArgumentException("[" + doc.getIndex() + "][" + doc.getType() + "][" + doc.getId() + "] didn't store _source"); - } - return true; - } + protected abstract boolean needsSourceDocumentVersions(); /** * Build the {@link RequestWrapper} for a single search hit. This shouldn't handle @@ -140,10 +179,328 @@ public abstract class AbstractAsyncBulkIndexByScrollAction docs) { + BulkRequest bulkRequest = new BulkRequest(); + for (ScrollableHitSource.Hit doc : docs) { + if (accept(doc)) { + RequestWrapper request = scriptApplier.apply(copyMetadata(buildRequest(doc), doc), doc); + if (request != null) { + bulkRequest.add(request.self()); + } + } + } + return bulkRequest; + } + + protected ScrollableHitSource buildScrollableResultSource(BackoffPolicy backoffPolicy) { + return new ClientScrollableHitSource(logger, backoffPolicy, threadPool, task::countSearchRetry, this::finishHim, client, + mainRequest.getSearchRequest()); + } + + /** + * Build the response for reindex actions. + */ + protected BulkByScrollResponse buildResponse(TimeValue took, List indexingFailures, + List searchFailures, boolean timedOut) { + return new BulkByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures, timedOut); + } + + /** + * Start the action by firing the initial search request. + */ + public void start() { + if (task.isCancelled()) { + finishHim(null); + return; + } + try { + startTime.set(System.nanoTime()); + scrollSource.start(response -> onScrollResponse(timeValueNanos(System.nanoTime()), 0, response)); + } catch (Exception e) { + finishHim(e); + } + } + + /** + * Process a scroll response. + * @param lastBatchStartTime the time when the last batch started. Used to calculate the throttling delay. + * @param lastBatchSize the size of the last batch. Used to calculate the throttling delay. + * @param response the scroll response to process + */ + void onScrollResponse(TimeValue lastBatchStartTime, int lastBatchSize, ScrollableHitSource.Response response) { + if (task.isCancelled()) { + finishHim(null); + return; + } + if ( // If any of the shards failed that should abort the request. + (response.getFailures().size() > 0) + // Timeouts aren't shard failures but we still need to pass them back to the user. + || response.isTimedOut() + ) { + refreshAndFinish(emptyList(), response.getFailures(), response.isTimedOut()); + return; + } + long total = response.getTotalHits(); + if (mainRequest.getSize() > 0) { + total = min(total, mainRequest.getSize()); + } + task.setTotal(total); + AbstractRunnable prepareBulkRequestRunnable = new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + /* + * It is important that the batch start time be calculated from here, scroll response to scroll response. That way the time + * waiting on the scroll doesn't count against this batch in the throttle. + */ + prepareBulkRequest(timeValueNanos(System.nanoTime()), response); + } + + @Override + public void onFailure(Exception e) { + finishHim(e); + } + }; + prepareBulkRequestRunnable = (AbstractRunnable) threadPool.getThreadContext().preserveContext(prepareBulkRequestRunnable); + task.delayPrepareBulkRequest(threadPool, lastBatchStartTime, lastBatchSize, prepareBulkRequestRunnable); + } + + /** + * Prepare the bulk request. Called on the generic thread pool after some preflight checks have been done one the SearchResponse and any + * delay has been slept. Uses the generic thread pool because reindex is rare enough not to need its own thread pool and because the + * thread may be blocked by the user script. + */ + void prepareBulkRequest(TimeValue thisBatchStartTime, ScrollableHitSource.Response response) { + if (task.isCancelled()) { + finishHim(null); + return; + } + if (response.getHits().isEmpty()) { + refreshAndFinish(emptyList(), emptyList(), false); + return; + } + task.countBatch(); + List hits = response.getHits(); + if (mainRequest.getSize() != SIZE_ALL_MATCHES) { + // Truncate the hits if we have more than the request size + long remaining = max(0, mainRequest.getSize() - task.getSuccessfullyProcessed()); + if (remaining < hits.size()) { + hits = hits.subList(0, (int) remaining); + } + } + BulkRequest request = buildBulk(hits); + if (request.requests().isEmpty()) { + /* + * If we noop-ed the entire batch then just skip to the next batch or the BulkRequest would fail validation. + */ + startNextScroll(thisBatchStartTime, 0); + return; + } + request.timeout(mainRequest.getTimeout()); + request.waitForActiveShards(mainRequest.getWaitForActiveShards()); + if (logger.isDebugEnabled()) { + logger.debug("sending [{}] entry, [{}] bulk request", request.requests().size(), + new ByteSizeValue(request.estimatedSizeInBytes())); + } + sendBulkRequest(thisBatchStartTime, request); + } + + /** + * Send a bulk request, handling retries. + */ + void sendBulkRequest(TimeValue thisBatchStartTime, BulkRequest request) { + if (task.isCancelled()) { + finishHim(null); + return; + } + bulkRetry.withAsyncBackoff(client, request, new ActionListener() { + @Override + public void onResponse(BulkResponse response) { + onBulkResponse(thisBatchStartTime, response); + } + + @Override + public void onFailure(Exception e) { + finishHim(e); + } + }); + } + + /** + * Processes bulk responses, accounting for failures. + */ + void onBulkResponse(TimeValue thisBatchStartTime, BulkResponse response) { + try { + List failures = new ArrayList(); + Set destinationIndicesThisBatch = new HashSet<>(); + for (BulkItemResponse item : response) { + if (item.isFailed()) { + recordFailure(item.getFailure(), failures); + continue; + } + switch (item.getOpType()) { + case CREATE: + case INDEX: + if (item.getResponse().getResult() == DocWriteResponse.Result.CREATED) { + task.countCreated(); + } else { + task.countUpdated(); + } + break; + case UPDATE: + task.countUpdated(); + break; + case DELETE: + task.countDeleted(); + break; + } + // Track the indexes we've seen so we can refresh them if requested + destinationIndicesThisBatch.add(item.getIndex()); + } + + if (task.isCancelled()) { + finishHim(null); + return; + } + + addDestinationIndices(destinationIndicesThisBatch); + + if (false == failures.isEmpty()) { + refreshAndFinish(unmodifiableList(failures), emptyList(), false); + return; + } + + if (mainRequest.getSize() != SIZE_ALL_MATCHES && task.getSuccessfullyProcessed() >= mainRequest.getSize()) { + // We've processed all the requested docs. + refreshAndFinish(emptyList(), emptyList(), false); + return; + } + + startNextScroll(thisBatchStartTime, response.getItems().length); + } catch (Exception t) { + finishHim(t); + } + } + + /** + * Start the next scroll request. + * + * @param lastBatchSize the number of requests sent in the last batch. This is used to calculate the throttling values which are applied + * when the scroll returns + */ + void startNextScroll(TimeValue lastBatchStartTime, int lastBatchSize) { + if (task.isCancelled()) { + finishHim(null); + return; + } + TimeValue extraKeepAlive = task.throttleWaitTime(lastBatchStartTime, lastBatchSize); + scrollSource.startNextScroll(extraKeepAlive, response -> { + onScrollResponse(lastBatchStartTime, lastBatchSize, response); + }); + } + + private void recordFailure(Failure failure, List failures) { + if (failure.getStatus() == CONFLICT) { + task.countVersionConflict(); + if (false == mainRequest.isAbortOnVersionConflict()) { + return; + } + } + failures.add(failure); + } + + /** + * Start terminating a request that finished non-catastrophically by refreshing the modified indices and then proceeding to + * {@link #finishHim(Exception, List, List, boolean)}. + */ + void refreshAndFinish(List indexingFailures, List searchFailures, boolean timedOut) { + if (task.isCancelled() || false == mainRequest.isRefresh() || destinationIndices.isEmpty()) { + finishHim(null, indexingFailures, searchFailures, timedOut); + return; + } + RefreshRequest refresh = new RefreshRequest(); + refresh.indices(destinationIndices.toArray(new String[destinationIndices.size()])); + client.admin().indices().refresh(refresh, new ActionListener() { + @Override + public void onResponse(RefreshResponse response) { + finishHim(null, indexingFailures, searchFailures, timedOut); + } + + @Override + public void onFailure(Exception e) { + finishHim(e); + } + }); + } + + /** + * Finish the request. + * + * @param failure if non null then the request failed catastrophically with this exception + */ + protected void finishHim(Exception failure) { + finishHim(failure, emptyList(), emptyList(), false); + } + + /** + * Finish the request. + * @param failure if non null then the request failed catastrophically with this exception + * @param indexingFailures any indexing failures accumulated during the request + * @param searchFailures any search failures accumulated during the request + * @param timedOut have any of the sub-requests timed out? + */ + protected void finishHim(Exception failure, List indexingFailures, List searchFailures, boolean timedOut) { + scrollSource.close(); + if (failure == null) { + listener.onResponse( + buildResponse(timeValueNanos(System.nanoTime() - startTime.get()), indexingFailures, searchFailures, timedOut)); + } else { + listener.onFailure(failure); + } + } + + /** + * Get the backoff policy for use with retries. + */ + BackoffPolicy buildBackoffPolicy() { + return exponentialBackoff(mainRequest.getRetryBackoffInitialTime(), mainRequest.getMaxRetries()); + } + + /** + * Add to the list of indices that were modified by this request. This is the list of indices refreshed at the end of the request if the + * request asks for a refresh. + */ + void addDestinationIndices(Collection indices) { + destinationIndices.addAll(indices); + } + + /** + * Set the last returned scrollId. Exists entirely for testing. + */ + void setScroll(String scroll) { + scrollSource.setScroll(scroll); + } + /** * Wrapper for the {@link DocWriteRequest} that are used in this action class. */ - interface RequestWrapper> { + public interface RequestWrapper> { void setIndex(String index); @@ -273,7 +630,7 @@ public abstract class AbstractAsyncBulkIndexByScrollAction wrap(IndexRequest request) { + public static RequestWrapper wrap(IndexRequest request) { return new IndexRequestWrapper(request); } @@ -404,7 +761,7 @@ public abstract class AbstractAsyncBulkIndexByScrollAction, Self extends AbstractBulkByScrollRequestBuilder> - extends ActionRequestBuilder { + extends ActionRequestBuilder { private final SearchRequestBuilder source; protected AbstractBulkByScrollRequestBuilder(ElasticsearchClient client, - Action action, SearchRequestBuilder source, Request request) { + Action action, SearchRequestBuilder source, Request request) { super(client, action, request); this.source = source; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AsyncDeleteByQueryAction.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AsyncDeleteByQueryAction.java new file mode 100644 index 00000000000..cdcfb754fb6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AsyncDeleteByQueryAction.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.bulk.byscroll; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.client.ParentTaskAssigningClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.threadpool.ThreadPool; + +/** + * Implementation of delete-by-query using scrolling and bulk. + */ +public class AsyncDeleteByQueryAction extends AbstractAsyncBulkByScrollAction { + public AsyncDeleteByQueryAction(WorkingBulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, + ThreadPool threadPool, DeleteByQueryRequest request, ScriptService scriptService, ClusterState clusterState, + ActionListener listener) { + super(task, logger, client, threadPool, request, scriptService, clusterState, listener); + } + + @Override + protected boolean needsSourceDocumentVersions() { + /* + * We always need the version of the source document so we can report a version conflict if we try to delete it and it has been + * changed. + */ + return true; + } + + @Override + protected boolean accept(ScrollableHitSource.Hit doc) { + // Delete-by-query does not require the source to delete a document + // and the default implementation checks for it + return true; + } + + @Override + protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc) { + DeleteRequest delete = new DeleteRequest(); + delete.index(doc.getIndex()); + delete.type(doc.getType()); + delete.id(doc.getId()); + delete.version(doc.getVersion()); + return wrap(delete); + } + + /** + * Overrides the parent's implementation is much more Update/Reindex oriented and so also copies things like timestamp/ttl which we + * don't care for a deletion. + */ + @Override + protected RequestWrapper copyMetadata(RequestWrapper request, ScrollableHitSource.Hit doc) { + request.setParent(doc.getParent()); + request.setRouting(doc.getRouting()); + return request; + } + +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexParallelizationHelper.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollParallelizationHelper.java similarity index 86% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexParallelizationHelper.java rename to core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollParallelizationHelper.java index b2dbd51f381..f2bd62c2335 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexParallelizationHelper.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollParallelizationHelper.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.reindex; +package org.elasticsearch.action.bulk.byscroll; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; @@ -32,24 +32,21 @@ import org.elasticsearch.tasks.TaskManager; /** * Helps parallelize reindex requests using sliced scrolls. */ -public class ReindexParallelizationHelper { - private ReindexParallelizationHelper() {} +public class BulkByScrollParallelizationHelper { + private BulkByScrollParallelizationHelper() {} public static < Request extends AbstractBulkByScrollRequest - > void startSlices(Client client, TaskManager taskManager, Action action, - String localNodeId, ParentBulkByScrollTask task, Request request, ActionListener listener) { + > void startSlices(Client client, TaskManager taskManager, Action action, + String localNodeId, ParentBulkByScrollTask task, Request request, ActionListener listener) { TaskId parentTaskId = new TaskId(localNodeId, task.getId()); for (final SearchRequest slice : sliceIntoSubRequests(request.getSearchRequest(), UidFieldMapper.NAME, request.getSlices())) { // TODO move the request to the correct node. maybe here or somehow do it as part of startup for reindex in general.... Request requestForSlice = request.forSlice(parentTaskId, slice); - ActionListener sliceListener = ActionListener.wrap( + ActionListener sliceListener = ActionListener.wrap( r -> task.onSliceResponse(listener, slice.source().slice().getId(), r), e -> task.onSliceFailure(listener, slice.source().slice().getId(), e)); client.execute(action, requestForSlice, sliceListener); - /* Explicitly tell the task manager that we're running child tasks on the local node so it will cancel them when the parent is - * cancelled. */ - taskManager.registerChildTask(task, localNodeId); } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollResponse.java similarity index 91% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java rename to core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollResponse.java index 1574a167108..20f2cf2ed06 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollResponse.java @@ -17,17 +17,17 @@ * under the License. */ -package org.elasticsearch.index.reindex; +package org.elasticsearch.action.bulk.byscroll; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; +import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import java.io.IOException; import java.util.ArrayList; @@ -41,17 +41,17 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; /** * Response used for actions that index many documents using a scroll request. */ -public class BulkIndexByScrollResponse extends ActionResponse implements ToXContent { +public class BulkByScrollResponse extends ActionResponse implements ToXContent { private TimeValue took; private BulkByScrollTask.Status status; private List bulkFailures; private List searchFailures; private boolean timedOut; - public BulkIndexByScrollResponse() { + public BulkByScrollResponse() { } - public BulkIndexByScrollResponse(TimeValue took, BulkByScrollTask.Status status, List bulkFailures, + public BulkByScrollResponse(TimeValue took, BulkByScrollTask.Status status, List bulkFailures, List searchFailures, boolean timedOut) { this.took = took; this.status = requireNonNull(status, "Null status not supported"); @@ -60,12 +60,12 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont this.timedOut = timedOut; } - public BulkIndexByScrollResponse(Iterable toMerge, @Nullable String reasonCancelled) { + public BulkByScrollResponse(Iterable toMerge, @Nullable String reasonCancelled) { long mergedTook = 0; List statuses = new ArrayList<>(); bulkFailures = new ArrayList<>(); searchFailures = new ArrayList<>(); - for (BulkIndexByScrollResponse response : toMerge) { + for (BulkByScrollResponse response : toMerge) { mergedTook = max(mergedTook, response.getTook().nanos()); statuses.add(new BulkByScrollTask.StatusOrException(response.status)); bulkFailures.addAll(response.getBulkFailures()); @@ -80,7 +80,7 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont return took; } - protected BulkByScrollTask.Status getStatus() { + public BulkByScrollTask.Status getStatus() { return status; } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollTask.java similarity index 97% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java rename to core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollTask.java index 7de4f19339f..7c9124057b3 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/BulkByScrollTask.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.reindex; +package org.elasticsearch.action.bulk.byscroll; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -51,15 +51,14 @@ public abstract class BulkByScrollTask extends CancellableTask { /** * The number of sub-slices that are still running. {@link WorkingBulkByScrollTask} will always have 0 and - * {@link ParentBulkByScrollTask} will return the number of waiting tasks. Used by {@link TransportRethrottleAction} to decide how to - * perform the rethrottling. + * {@link ParentBulkByScrollTask} will return the number of waiting tasks. Used to decide how to perform rethrottling. */ - abstract int runningSliceSubTasks(); + public abstract int runningSliceSubTasks(); /** * Apply the {@code newRequestsPerSecond}. */ - abstract void rethrottle(float newRequestsPerSecond); + public abstract void rethrottle(float newRequestsPerSecond); /* * Overridden to force children to return compatible status. @@ -71,6 +70,11 @@ public abstract class BulkByScrollTask extends CancellableTask { */ public abstract TaskInfo getInfoGivenSliceInfo(String localNodeId, List sliceInfo); + @Override + public boolean shouldCancelChildrenOnCancellation() { + return true; + } + public static class Status implements Task.Status, SuccessfullyProcessed { public static final String NAME = "bulk-by-scroll"; @@ -486,7 +490,7 @@ public abstract class BulkByScrollTask extends CancellableTask { status.toXContent(builder, params); } else { builder.startObject(); - ElasticsearchException.toXContent(builder, params, exception); + ElasticsearchException.generateThrowableXContent(builder, params, exception); builder.endObject(); } return builder; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ClientScrollableHitSource.java similarity index 95% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java rename to core/src/main/java/org/elasticsearch/action/bulk/byscroll/ClientScrollableHitSource.java index 7b6be85140f..4f2aefc1011 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ClientScrollableHitSource.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.reindex; +package org.elasticsearch.action.bulk.byscroll; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -37,6 +37,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.search.SearchHit; @@ -88,11 +90,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource { } @Override - public void clearScroll(String scrollId) { - /* - * Fire off the clear scroll but don't wait for it it return before - * we send the use their response. - */ + public void clearScroll(String scrollId, Runnable onCompletion) { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); clearScrollRequest.addScrollId(scrollId); /* @@ -103,15 +101,22 @@ public class ClientScrollableHitSource extends ScrollableHitSource { @Override public void onResponse(ClearScrollResponse response) { logger.debug("Freed [{}] contexts", response.getNumFreed()); + onCompletion.run(); } @Override public void onFailure(Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e); + onCompletion.run(); } }); } + @Override + protected void cleanup() { + // Nothing to do + } + /** * Run a search action and call onResponse when a the response comes in, retrying if the action fails with an exception caused by * rejected execution. @@ -182,7 +187,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource { } else { failures = new ArrayList<>(response.getShardFailures().length); for (ShardSearchFailure failure: response.getShardFailures()) { - String nodeId = failure.shard() == null ? null : failure.shard().nodeId(); + String nodeId = failure.shard() == null ? null : failure.shard().getNodeId(); failures.add(new SearchFailure(failure.getCause(), failure.index(), failure.shardId(), nodeId)); } } @@ -229,6 +234,10 @@ public class ClientScrollableHitSource extends ScrollableHitSource { return source; } + @Override + public XContentType getXContentType() { + return XContentFactory.xContentType(source); + } @Override public long getVersion() { return delegate.getVersion(); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequest.java similarity index 97% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java rename to core/src/main/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequest.java index cde5fea926c..4c261f471f2 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequest.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.reindex; +package org.elasticsearch.action.bulk.byscroll; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -79,7 +79,7 @@ public class DeleteByQueryRequest extends AbstractBulkByScrollRequest> results; + private final AtomicArray> results; private final AtomicInteger counter; public ParentBulkByScrollTask(long id, String type, String action, String description, TaskId parentTaskId, int slices) { @@ -50,7 +50,7 @@ class ParentBulkByScrollTask extends BulkByScrollTask { } @Override - void rethrottle(float newRequestsPerSecond) { + public void rethrottle(float newRequestsPerSecond) { // Nothing to do because all rethrottling is done on slice sub tasks. } @@ -63,7 +63,7 @@ class ParentBulkByScrollTask extends BulkByScrollTask { } @Override - int runningSliceSubTasks() { + public int runningSliceSubTasks() { return counter.get(); } @@ -82,7 +82,7 @@ class ParentBulkByScrollTask extends BulkByScrollTask { } private void addResultsToList(List sliceStatuses) { - for (AtomicArray.Entry> t : results.asList()) { + for (AtomicArray.Entry> t : results.asList()) { if (t.value != null) { if (t.value.v1() != null) { sliceStatuses.set(t.index, new StatusOrException(t.value.v1().getStatus())); @@ -96,7 +96,7 @@ class ParentBulkByScrollTask extends BulkByScrollTask { /** * Record a response from a slice and respond to the listener if the request is finished. */ - void onSliceResponse(ActionListener listener, int sliceId, BulkIndexByScrollResponse response) { + public void onSliceResponse(ActionListener listener, int sliceId, BulkByScrollResponse response) { results.setOnce(sliceId, new Tuple<>(response, null)); /* If the request isn't finished we could automatically rethrottle the sub-requests here but we would only want to do that if we * were fairly sure they had a while left to go. */ @@ -106,19 +106,19 @@ class ParentBulkByScrollTask extends BulkByScrollTask { /** * Record a failure from a slice and respond to the listener if the request is finished. */ - void onSliceFailure(ActionListener listener, int sliceId, Exception e) { + void onSliceFailure(ActionListener listener, int sliceId, Exception e) { results.setOnce(sliceId, new Tuple<>(null, e)); recordSliceCompletionAndRespondIfAllDone(listener); // TODO cancel when a slice fails? } - private void recordSliceCompletionAndRespondIfAllDone(ActionListener listener) { + private void recordSliceCompletionAndRespondIfAllDone(ActionListener listener) { if (counter.decrementAndGet() != 0) { return; } - List responses = new ArrayList<>(results.length()); + List responses = new ArrayList<>(results.length()); Exception exception = null; - for (AtomicArray.Entry> t : results.asList()) { + for (AtomicArray.Entry> t : results.asList()) { if (t.value.v1() == null) { assert t.value.v2() != null : "exception shouldn't be null if value is null"; if (exception == null) { @@ -132,7 +132,7 @@ class ParentBulkByScrollTask extends BulkByScrollTask { } } if (exception == null) { - listener.onResponse(new BulkIndexByScrollResponse(responses, getReasonCancelled())); + listener.onResponse(new BulkByScrollResponse(responses, getReasonCancelled())); } else { listener.onFailure(exception); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ScrollableHitSource.java similarity index 89% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java rename to core/src/main/java/org/elasticsearch/action/bulk/byscroll/ScrollableHitSource.java index bf13d6d72e2..73aa6536986 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ScrollableHitSource.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.reindex; +package org.elasticsearch.action.bulk.byscroll; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -32,7 +32,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.reindex.remote.RemoteScrollableHitSource; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.threadpool.ThreadPool; @@ -81,15 +81,26 @@ public abstract class ScrollableHitSource implements Closeable { }); } protected abstract void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, Consumer onResponse); - + @Override - public void close() { + public final void close() { String scrollId = this.scrollId.get(); if (Strings.hasLength(scrollId)) { - clearScroll(scrollId); + clearScroll(scrollId, this::cleanup); + } else { + cleanup(); } } - protected abstract void clearScroll(String scrollId); + /** + * Called to clear a scroll id. + * @param scrollId the id to clear + * @param onCompletion implementers must call this after completing the clear whether they are successful or not + */ + protected abstract void clearScroll(String scrollId, Runnable onCompletion); + /** + * Called after the process has been totally finished to clean up any resources the process needed like remote connections. + */ + protected abstract void cleanup(); /** * Set the id of the last scroll. Used for debugging. @@ -179,6 +190,10 @@ public abstract class ScrollableHitSource implements Closeable { * all. */ @Nullable BytesReference getSource(); + /** + * The content type of the hit source. Returns null if the source didn't come back from the search. + */ + @Nullable XContentType getXContentType(); /** * The document id of the parent of the hit if there is a parent or null if there isn't. */ @@ -190,8 +205,7 @@ public abstract class ScrollableHitSource implements Closeable { } /** - * An implementation of {@linkplain Hit} that uses getters and setters. Primarily used for testing and {@link RemoteScrollableHitSource} - * . + * An implementation of {@linkplain Hit} that uses getters and setters. */ public static class BasicHit implements Hit { private final String index; @@ -200,6 +214,7 @@ public abstract class ScrollableHitSource implements Closeable { private final long version; private BytesReference source; + private XContentType xContentType; private String parent; private String routing; @@ -235,8 +250,14 @@ public abstract class ScrollableHitSource implements Closeable { return source; } - public BasicHit setSource(BytesReference source) { + @Override + public XContentType getXContentType() { + return xContentType; + } + + public BasicHit setSource(BytesReference source, XContentType xContentType) { this.source = source; + this.xContentType = xContentType; return this; } @@ -337,7 +358,7 @@ public abstract class ScrollableHitSource implements Closeable { builder.field("reason"); { builder.startObject(); - ElasticsearchException.toXContent(builder, params, reason); + ElasticsearchException.generateThrowableXContent(builder, params, reason); builder.endObject(); } builder.endObject(); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/SuccessfullyProcessed.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/SuccessfullyProcessed.java similarity index 96% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/SuccessfullyProcessed.java rename to core/src/main/java/org/elasticsearch/action/bulk/byscroll/SuccessfullyProcessed.java index 6547984900e..a0176e35202 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/SuccessfullyProcessed.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/SuccessfullyProcessed.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.reindex; +package org.elasticsearch.action.bulk.byscroll; /** * Implemented by {@link BulkByScrollTask} and {@link BulkByScrollTask.Status} to consistently implement diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTask.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTask.java similarity index 98% rename from modules/reindex/src/main/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTask.java rename to core/src/main/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTask.java index 3e09ee2d7fd..1b458caa3d5 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTask.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTask.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.reindex; +package org.elasticsearch.action.bulk.byscroll; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.ESLoggerFactory; @@ -95,7 +95,7 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success } @Override - int runningSliceSubTasks() { + public int runningSliceSubTasks() { return 0; } @@ -162,7 +162,7 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success bulkRetries.incrementAndGet(); } - void countSearchRetry() { + public void countSearchRetry() { searchRetries.incrementAndGet(); } @@ -209,7 +209,7 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success } @Override - void rethrottle(float newRequestsPerSecond) { + public void rethrottle(float newRequestsPerSecond) { synchronized (delayedPrepareBulkRequestReference) { if (logger.isDebugEnabled()) { logger.debug("[{}]: Rethrottling to [{}] requests per second", getId(), newRequestsPerSecond); @@ -306,7 +306,7 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success private final AtomicBoolean hasRun = new AtomicBoolean(false); private final AbstractRunnable delegate; - public RunOnce(AbstractRunnable delegate) { + RunOnce(AbstractRunnable delegate) { this.delegate = delegate; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/package-info.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/package-info.java new file mode 100644 index 00000000000..3a31ea2f654 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * Infrastructure for actions that modify documents based on the results of a scrolling query. + */ +package org.elasticsearch.action.bulk.byscroll; diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index 280324227cc..72d8c4e5857 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.delete; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.common.Nullable; @@ -43,7 +44,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * @see org.elasticsearch.client.Client#delete(DeleteRequest) * @see org.elasticsearch.client.Requests#deleteRequest(String) */ -public class DeleteRequest extends ReplicatedWriteRequest implements DocWriteRequest { +public class DeleteRequest extends ReplicatedWriteRequest implements DocWriteRequest, CompositeIndicesRequest { private String type; private String id; diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java index 0f4eb897d83..9ab06a941eb 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java @@ -20,12 +20,20 @@ package org.elasticsearch.action.delete; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + /** * The response of the delete action. * @@ -34,6 +42,8 @@ import java.io.IOException; */ public class DeleteResponse extends DocWriteResponse { + private static final String FOUND = "found"; + public DeleteResponse() { } @@ -48,12 +58,36 @@ public class DeleteResponse extends DocWriteResponse { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("found", result == Result.DELETED); - super.toXContent(builder, params); + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(FOUND, result == Result.DELETED); + super.innerToXContent(builder, params); return builder; } + private static final ConstructingObjectParser PARSER; + static { + PARSER = new ConstructingObjectParser<>(DeleteResponse.class.getName(), + args -> { + // index uuid and shard id are unknown and can't be parsed back for now. + ShardId shardId = new ShardId(new Index((String) args[0], IndexMetaData.INDEX_UUID_NA_VALUE), -1); + String type = (String) args[1]; + String id = (String) args[2]; + long version = (long) args[3]; + ShardInfo shardInfo = (ShardInfo) args[5]; + long seqNo = (args[6] != null) ? (long) args[6] : SequenceNumbersService.UNASSIGNED_SEQ_NO; + boolean found = (boolean) args[7]; + DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, version, found); + deleteResponse.setShardInfo(shardInfo); + return deleteResponse; + }); + DocWriteResponse.declareParserFields(PARSER); + PARSER.declareBoolean(constructorArg(), new ParseField(FOUND)); + } + + public static DeleteResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + @Override public String toString() { StringBuilder builder = new StringBuilder(); diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 0d0d76c7691..3aaf4a472fa 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -19,147 +19,39 @@ package org.elasticsearch.action.delete; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.RoutingMissingException; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.bulk.TransportBulkAction; +import org.elasticsearch.action.bulk.TransportShardBulkAction; +import org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.action.support.replication.TransportWriteAction; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; /** * Performs the delete operation. + * + * Deprecated use TransportBulkAction with a single item instead */ -public class TransportDeleteAction extends TransportWriteAction { - - private final AutoCreateIndex autoCreateIndex; - private final TransportCreateIndexAction createIndexAction; +@Deprecated +public class TransportDeleteAction extends TransportSingleItemBulkWriteAction { @Inject public TransportDeleteAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, - TransportCreateIndexAction createIndexAction, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - AutoCreateIndex autoCreateIndex) { - super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.INDEX); - this.createIndexAction = createIndexAction; - this.autoCreateIndex = autoCreateIndex; - } - - @Override - protected void doExecute(Task task, final DeleteRequest request, final ActionListener listener) { - ClusterState state = clusterService.state(); - if (autoCreateIndex.shouldAutoCreate(request.index(), state)) { - CreateIndexRequest createIndexRequest = new CreateIndexRequest() - .index(request.index()) - .cause("auto(delete api)") - .masterNodeTimeout(request.timeout()); - createIndexAction.execute(task, createIndexRequest, new ActionListener() { - @Override - public void onResponse(CreateIndexResponse result) { - innerExecute(task, request, listener); - } - - @Override - public void onFailure(Exception e) { - if (ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException) { - // we have the index, do it - innerExecute(task, request, listener); - } else { - listener.onFailure(e); - } - } - }); - } else { - innerExecute(task, request, listener); - } - } - - @Override - protected void resolveRequest(final MetaData metaData, IndexMetaData indexMetaData, DeleteRequest request) { - super.resolveRequest(metaData, indexMetaData, request); - resolveAndValidateRouting(metaData, indexMetaData.getIndex().getName(), request); - ShardId shardId = clusterService.operationRouting().shardId(clusterService.state(), - indexMetaData.getIndex().getName(), request.id(), request.routing()); - request.setShardId(shardId); - } - - public static void resolveAndValidateRouting(final MetaData metaData, final String concreteIndex, - DeleteRequest request) { - request.routing(metaData.resolveIndexRouting(request.parent(), request.routing(), request.index())); - // check if routing is required, if so, throw error if routing wasn't specified - if (request.routing() == null && metaData.routingRequired(concreteIndex, request.type())) { - throw new RoutingMissingException(concreteIndex, request.type(), request.id()); - } - } - - private void innerExecute(Task task, final DeleteRequest request, final ActionListener listener) { - super.doExecute(task, request, listener); + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) { + super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, + actionFilters, indexNameExpressionResolver, DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.INDEX, + bulkAction, shardBulkAction); } @Override protected DeleteResponse newResponseInstance() { return new DeleteResponse(); } - - @Override - protected WritePrimaryResult shardOperationOnPrimary(DeleteRequest request, IndexShard primary) throws Exception { - final Engine.DeleteResult result = executeDeleteRequestOnPrimary(request, primary); - final DeleteResponse response; - if (result.hasFailure() == false) { - // update the request with the version so it will go to the replicas - request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); - request.version(result.getVersion()); - request.seqNo(result.getSeqNo()); - assert request.versionType().validateVersionForWrites(request.version()); - response = new DeleteResponse( - primary.shardId(), - request.type(), - request.id(), - result.getSeqNo(), - result.getVersion(), - result.isFound()); - } else { - response = null; - } - return new WritePrimaryResult(request, response, result.getTranslogLocation(), result.getFailure(), primary); - } - - @Override - protected WriteReplicaResult shardOperationOnReplica(DeleteRequest request, IndexShard replica) throws Exception { - final Engine.DeleteResult result = executeDeleteRequestOnReplica(request, replica); - return new WriteReplicaResult(request, result.getTranslogLocation(), result.getFailure(), replica); - } - - - public static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) { - final Engine.Delete delete = primary.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType()); - return primary.delete(delete); - } - - public static Engine.DeleteResult executeDeleteRequestOnReplica(DeleteRequest request, IndexShard replica) { - final Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(), - request.seqNo(), request.primaryTerm(), request.version(), request.versionType()); - return replica.delete(delete); - } - } diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java index 9cc89095057..6b7a8cd7202 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java @@ -124,6 +124,8 @@ public abstract class FieldStats implements Writeable, ToXContent { return "string"; case 4: return "ip"; + case 5: + return "geo_point"; default: throw new IllegalArgumentException("Unknown type."); } @@ -276,7 +278,7 @@ public abstract class FieldStats implements Writeable, ToXContent { } } - private void updateMinMax(T min, T max) { + protected void updateMinMax(T min, T max) { if (compare(minValue, min) > 0) { minValue = min; } @@ -327,6 +329,8 @@ public abstract class FieldStats implements Writeable, ToXContent { writeMinMax(out); } } else { + assert hasMinMax : "cannot serialize null min/max fieldstats in a mixed-cluster " + + "with pre-" + Version.V_5_2_0_UNRELEASED + " nodes, remote version [" + out.getVersion() + "]"; writeMinMax(out); } } @@ -643,6 +647,55 @@ public abstract class FieldStats implements Writeable, ToXContent { } } + public static class GeoPoint extends FieldStats { + public GeoPoint(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, + boolean isSearchable, boolean isAggregatable) { + super((byte) 5, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, + isSearchable, isAggregatable); + } + + public GeoPoint(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, + boolean isSearchable, boolean isAggregatable, + org.elasticsearch.common.geo.GeoPoint minValue, org.elasticsearch.common.geo.GeoPoint maxValue) { + super((byte) 5, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable, + minValue, maxValue); + } + + @Override + public org.elasticsearch.common.geo.GeoPoint valueOf(String value, String fmt) { + return org.elasticsearch.common.geo.GeoPoint.parseFromLatLon(value); + } + + @Override + protected void updateMinMax(org.elasticsearch.common.geo.GeoPoint min, org.elasticsearch.common.geo.GeoPoint max) { + minValue.reset(Math.min(min.lat(), minValue.lat()), Math.min(min.lon(), minValue.lon())); + maxValue.reset(Math.max(max.lat(), maxValue.lat()), Math.max(max.lon(), maxValue.lon())); + } + + @Override + public int compare(org.elasticsearch.common.geo.GeoPoint p1, org.elasticsearch.common.geo.GeoPoint p2) { + throw new IllegalArgumentException("compare is not supported for geo_point field stats"); + } + + @Override + public void writeMinMax(StreamOutput out) throws IOException { + out.writeDouble(minValue.lat()); + out.writeDouble(minValue.lon()); + out.writeDouble(maxValue.lat()); + out.writeDouble(maxValue.lon()); + } + + @Override + public String getMinValueAsString() { + return minValue.toString(); + } + + @Override + public String getMaxValueAsString() { + return maxValue.toString(); + } + } + public static FieldStats readFrom(StreamInput in) throws IOException { byte type = in.readByte(); long maxDoc = in.readLong(); @@ -690,7 +743,7 @@ public abstract class FieldStats implements Writeable, ToXContent { isSearchable, isAggregatable); } - case 4: + case 4: { if (hasMinMax == false) { return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable); @@ -705,7 +758,17 @@ public abstract class FieldStats implements Writeable, ToXContent { InetAddress max = InetAddressPoint.decode(b2); return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable, min, max); - + } + case 5: { + if (hasMinMax == false) { + return new GeoPoint(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, + isSearchable, isAggregatable); + } + org.elasticsearch.common.geo.GeoPoint min = new org.elasticsearch.common.geo.GeoPoint(in.readDouble(), in.readDouble()); + org.elasticsearch.common.geo.GeoPoint max = new org.elasticsearch.common.geo.GeoPoint(in.readDouble(), in.readDouble()); + return new GeoPoint(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, + isSearchable, isAggregatable, min, max); + } default: throw new IllegalArgumentException("Unknown type."); } diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java index d94cfcd2958..133a94e69a4 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.fieldstats; +import org.elasticsearch.Version; import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,6 +28,7 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.HashMap; import java.util.Map; +import java.util.stream.Collectors; public class FieldStatsShardResponse extends BroadcastShardResponse { @@ -44,6 +46,12 @@ public class FieldStatsShardResponse extends BroadcastShardResponse { return fieldStats; } + Map > filterNullMinMax() { + return fieldStats.entrySet().stream() + .filter((e) -> e.getValue().hasMinMax()) + .collect(Collectors.toMap(p -> p.getKey(), p -> p.getValue())); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -59,8 +67,17 @@ public class FieldStatsShardResponse extends BroadcastShardResponse { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeVInt(fieldStats.size()); - for (Map.Entry> entry : fieldStats.entrySet()) { + final Map > stats; + if (out.getVersion().before(Version.V_5_2_0_UNRELEASED)) { + /** + * FieldStats with null min/max are not (de)serializable in versions prior to {@link Version.V_5_2_0_UNRELEASED} + */ + stats = filterNullMinMax(); + } else { + stats = getFieldStats(); + } + out.writeVInt(stats.size()); + for (Map.Entry> entry : stats.entrySet()) { out.writeString(entry.getKey()); entry.getValue().writeTo(out); } diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsAction.java b/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsAction.java index e65f6951432..9ee72223a66 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsAction.java @@ -36,8 +36,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; diff --git a/core/src/main/java/org/elasticsearch/action/get/GetResponse.java b/core/src/main/java/org/elasticsearch/action/get/GetResponse.java index 1b347a2d05d..296fbe6610e 100644 --- a/core/src/main/java/org/elasticsearch/action/get/GetResponse.java +++ b/core/src/main/java/org/elasticsearch/action/get/GetResponse.java @@ -25,14 +25,16 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.get.GetField; import org.elasticsearch.index.get.GetResult; import java.io.IOException; import java.util.Iterator; import java.util.Map; +import java.util.Objects; /** * The response of a get action. @@ -40,9 +42,9 @@ import java.util.Map; * @see GetRequest * @see org.elasticsearch.client.Client#get(GetRequest) */ -public class GetResponse extends ActionResponse implements Iterable, ToXContent { +public class GetResponse extends ActionResponse implements Iterable, ToXContentObject { - private GetResult getResult; + GetResult getResult; GetResponse() { } @@ -156,6 +158,11 @@ public class GetResponse extends ActionResponse implements Iterable, T return getResult.toXContent(builder, params); } + public static GetResponse fromXContent(XContentParser parser) throws IOException { + GetResult getResult = GetResult.fromXContent(parser); + return new GetResponse(getResult); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -168,8 +175,25 @@ public class GetResponse extends ActionResponse implements Iterable, T getResult.writeTo(out); } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GetResponse getResponse = (GetResponse) o; + return Objects.equals(getResult, getResponse.getResult); + } + + @Override + public int hashCode() { + return Objects.hash(getResult); + } + @Override public String toString() { - return Strings.toString(this, true); + return Strings.toString(this); } } diff --git a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index 5407184ded3..d4627391b11 100644 --- a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -379,7 +379,8 @@ public class MultiGetRequest extends ActionRequest implements Iterable, ToXContent { +public class MultiGetResponse extends ActionResponse implements Iterable, ToXContentObject { /** * Represents a failure. @@ -128,6 +128,7 @@ public class MultiGetResponse extends ActionResponse implements Iterable implements DocWriteRequest { +public class IndexRequest extends ReplicatedWriteRequest implements DocWriteRequest, CompositeIndicesRequest { private String type; private String id; @@ -82,7 +84,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; - private XContentType contentType = Requests.INDEX_CONTENT_TYPE; + private XContentType contentType; private String pipeline; @@ -102,7 +104,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement /** * Constructs a new index request against the specific index. The {@link #type(String)} - * {@link #source(byte[])} must be set. + * {@link #source(byte[], XContentType)} must be set. */ public IndexRequest(String index) { this.index = index; @@ -139,7 +141,9 @@ public class IndexRequest extends ReplicatedWriteRequest implement if (source == null) { validationException = addValidationError("source is missing", validationException); } - + if (contentType == null) { + validationException = addValidationError("content type is missing", validationException); + } final long resolvedVersion = resolveVersionDefaults(); if (opType() == OpType.CREATE) { if (versionType != VersionType.INTERNAL) { @@ -178,20 +182,13 @@ public class IndexRequest extends ReplicatedWriteRequest implement } /** - * The content type that will be used when generating a document from user provided objects like Maps. + * The content type. This will be used when generating a document from user provided objects like Maps and when parsing the + * source at index time */ public XContentType getContentType() { return contentType; } - /** - * Sets the content type that will be used when generating a document from user provided objects (like Map). - */ - public IndexRequest contentType(XContentType contentType) { - this.contentType = contentType; - return this; - } - /** * The type of the indexed document. */ @@ -283,16 +280,16 @@ public class IndexRequest extends ReplicatedWriteRequest implement } public Map sourceAsMap() { - return XContentHelper.convertToMap(source, false).v2(); + return XContentHelper.convertToMap(source, false, contentType).v2(); } /** - * Index the Map as a {@link org.elasticsearch.client.Requests#INDEX_CONTENT_TYPE}. + * Index the Map in {@link Requests#INDEX_CONTENT_TYPE} format * * @param source The map to index */ public IndexRequest source(Map source) throws ElasticsearchGenerationException { - return source(source, contentType); + return source(source, Requests.INDEX_CONTENT_TYPE); } /** @@ -313,24 +310,32 @@ public class IndexRequest extends ReplicatedWriteRequest implement /** * Sets the document source to index. * - * Note, its preferable to either set it using {@link #source(org.elasticsearch.common.xcontent.XContentBuilder)} - * or using the {@link #source(byte[])}. + * @deprecated use {@link #source(String, XContentType)} */ + @Deprecated public IndexRequest source(String source) { - this.source = new BytesArray(source.getBytes(StandardCharsets.UTF_8)); - return this; + return source(new BytesArray(source), XContentFactory.xContentType(source)); + } + + /** + * Sets the document source to index. + * + * Note, its preferable to either set it using {@link #source(org.elasticsearch.common.xcontent.XContentBuilder)} + * or using the {@link #source(byte[], XContentType)}. + */ + public IndexRequest source(String source, XContentType xContentType) { + return source(new BytesArray(source), xContentType); } /** * Sets the content source to index. */ public IndexRequest source(XContentBuilder sourceBuilder) { - source = sourceBuilder.bytes(); - return this; + return source(sourceBuilder.bytes(), sourceBuilder.contentType()); } /** - * Sets the content source to index. + * Sets the content source to index using the default content type ({@link Requests#INDEX_CONTENT_TYPE}) *

* Note: the number of objects passed to this method must be an even * number. Also the first argument in each pair (the field name) must have a @@ -338,6 +343,18 @@ public class IndexRequest extends ReplicatedWriteRequest implement *

*/ public IndexRequest source(Object... source) { + return source(Requests.INDEX_CONTENT_TYPE, source); + } + + /** + * Sets the content source to index. + *

+ * Note: the number of objects passed to this method as varargs must be an even + * number. Also the first argument in each pair (the field name) must have a + * valid String representation. + *

+ */ + public IndexRequest source(XContentType xContentType, Object... source) { if (source.length % 2 != 0) { throw new IllegalArgumentException("The number of object passed must be even but was [" + source.length + "]"); } @@ -345,7 +362,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement throw new IllegalArgumentException("you are using the removed method for source with bytes and unsafe flag, the unsafe flag was removed, please just use source(BytesReference)"); } try { - XContentBuilder builder = XContentFactory.contentBuilder(contentType); + XContentBuilder builder = XContentFactory.contentBuilder(xContentType); builder.startObject(); for (int i = 0; i < source.length; i++) { builder.field(source[i++].toString(), source[i]); @@ -359,19 +376,53 @@ public class IndexRequest extends ReplicatedWriteRequest implement /** * Sets the document to index in bytes form. + * @deprecated use {@link #source(BytesReference, XContentType)} */ + @Deprecated public IndexRequest source(BytesReference source) { - this.source = source; - return this; + return source(source, XContentFactory.xContentType(source)); + } /** * Sets the document to index in bytes form. */ + public IndexRequest source(BytesReference source, XContentType xContentType) { + this.source = Objects.requireNonNull(source); + this.contentType = Objects.requireNonNull(xContentType); + return this; + } + + /** + * Sets the document to index in bytes form. + * @deprecated use {@link #source(byte[], XContentType)} + */ + @Deprecated public IndexRequest source(byte[] source) { return source(source, 0, source.length); } + /** + * Sets the document to index in bytes form. + */ + public IndexRequest source(byte[] source, XContentType xContentType) { + return source(source, 0, source.length, xContentType); + } + + /** + * Sets the document to index in bytes form (assumed to be safe to be used from different + * threads). + * + * @param source The source to index + * @param offset The offset in the byte array + * @param length The length of the data + * @deprecated use {@link #source(byte[], int, int, XContentType)} + */ + @Deprecated + public IndexRequest source(byte[] source, int offset, int length) { + return source(new BytesArray(source, offset, length), XContentFactory.xContentType(source)); + } + /** * Sets the document to index in bytes form (assumed to be safe to be used from different * threads). @@ -380,9 +431,8 @@ public class IndexRequest extends ReplicatedWriteRequest implement * @param offset The offset in the byte array * @param length The length of the data */ - public IndexRequest source(byte[] source, int offset, int length) { - this.source = new BytesArray(source, offset, length); - return this; + public IndexRequest source(byte[] source, int offset, int length, XContentType xContentType) { + return source(new BytesArray(source, offset, length), xContentType); } /** @@ -514,6 +564,11 @@ public class IndexRequest extends ReplicatedWriteRequest implement pipeline = in.readOptionalString(); isRetry = in.readBoolean(); autoGeneratedTimestamp = in.readLong(); + if (in.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting + contentType = in.readOptionalWriteable(XContentType::readFrom); + } else { + contentType = XContentFactory.xContentType(source); + } } @Override @@ -524,7 +579,10 @@ public class IndexRequest extends ReplicatedWriteRequest implement out.writeOptionalString(routing); out.writeOptionalString(parent); if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { - out.writeOptionalString(null); + // Serialize a fake timestamp. 5.x expect this value to be set by the #process method so we can't use null. + // On the other hand, indices created on 5.x do not index the timestamp field. Therefore passing a 0 (or any value) for + // the transport layer OK as it will be ignored. + out.writeOptionalString("0"); out.writeOptionalWriteable(null); } out.writeBytesReference(source); @@ -539,6 +597,9 @@ public class IndexRequest extends ReplicatedWriteRequest implement out.writeOptionalString(pipeline); out.writeBoolean(isRetry); out.writeLong(autoGeneratedTimestamp); + if (out.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting + out.writeOptionalWriteable(contentType); + } } @Override diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index f7df8bffced..7af43ec35ec 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -82,12 +82,22 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder * Note, its preferable to either set it using {@link #setSource(org.elasticsearch.common.xcontent.XContentBuilder)} - * or using the {@link #setSource(byte[])}. + * or using the {@link #setSource(byte[], XContentType)}. + * @deprecated use {@link #setSource(String, XContentType)} */ + @Deprecated public IndexRequestBuilder setSource(String source) { request.source(source); return this; } + /** + * Sets the document source to index. + *

+ * Note, its preferable to either set it using {@link #setSource(org.elasticsearch.common.xcontent.XContentBuilder)} + * or using the {@link #setSource(byte[], XContentType)}. + */ + public IndexRequestBuilder setSource(String source, XContentType xContentType) { + request.source(source, xContentType); + return this; + } + /** * Sets the content source to index. */ @@ -129,12 +152,22 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder @@ -162,10 +211,15 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder + * Note: the number of objects passed as varargs to this method must be an even + * number. Also the first argument in each pair (the field name) must have a + * valid String representation. + *

*/ - public IndexRequestBuilder setContentType(XContentType contentType) { - request.contentType(contentType); + public IndexRequestBuilder setSource(XContentType xContentType, Object... source) { + request.source(xContentType, source); return this; } diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java index b092e7e8e74..d2d367152f4 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -20,13 +20,21 @@ package org.elasticsearch.action.index; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + /** * A response of an index operation, * @@ -35,6 +43,8 @@ import java.io.IOException; */ public class IndexResponse extends DocWriteResponse { + private static final String CREATED = "created"; + public IndexResponse() { } @@ -57,14 +67,45 @@ public class IndexResponse extends DocWriteResponse { builder.append(",version=").append(getVersion()); builder.append(",result=").append(getResult().getLowercase()); builder.append(",seqNo=").append(getSeqNo()); - builder.append(",shards=").append(Strings.toString(getShardInfo(), true)); + builder.append(",shards=").append(Strings.toString(getShardInfo())); return builder.append("]").toString(); } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - super.toXContent(builder, params); - builder.field("created", result == Result.CREATED); + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + super.innerToXContent(builder, params); + builder.field(CREATED, result == Result.CREATED); return builder; } + + /** + * ConstructingObjectParser used to parse the {@link IndexResponse}. We use a ObjectParser here + * because most fields are parsed by the parent abstract class {@link DocWriteResponse} and it's + * not easy to parse part of the fields in the parent class and other fields in the children class + * using the usual streamed parsing method. + */ + private static final ConstructingObjectParser PARSER; + static { + PARSER = new ConstructingObjectParser<>(IndexResponse.class.getName(), + args -> { + // index uuid and shard id are unknown and can't be parsed back for now. + ShardId shardId = new ShardId(new Index((String) args[0], IndexMetaData.INDEX_UUID_NA_VALUE), -1); + String type = (String) args[1]; + String id = (String) args[2]; + long version = (long) args[3]; + ShardInfo shardInfo = (ShardInfo) args[5]; + long seqNo = (args[6] != null) ? (long) args[6] : SequenceNumbersService.UNASSIGNED_SEQ_NO; + boolean created = (boolean) args[7]; + + IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, version, created); + indexResponse.setShardInfo(shardInfo); + return indexResponse; + }); + DocWriteResponse.declareParserFields(PARSER); + PARSER.declareBoolean(constructorArg(), new ParseField(CREATED)); + } + + public static IndexResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 70220679752..88a210c7180 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -19,39 +19,16 @@ package org.elasticsearch.action.index; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; -import org.elasticsearch.action.ingest.IngestActionForwarder; +import org.elasticsearch.action.bulk.TransportBulkAction; +import org.elasticsearch.action.bulk.TransportShardBulkAction; +import org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.action.support.replication.ReplicationOperation; -import org.elasticsearch.action.support.replication.TransportWriteAction; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.ingest.IngestService; -import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -64,202 +41,25 @@ import org.elasticsearch.transport.TransportService; * Defaults to true. *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to true. * + * + * Deprecated use TransportBulkAction with a single item instead */ -public class TransportIndexAction extends TransportWriteAction { - - private final AutoCreateIndex autoCreateIndex; - private final boolean allowIdGeneration; - private final TransportCreateIndexAction createIndexAction; - - private final ClusterService clusterService; - private final IngestService ingestService; - private final MappingUpdatedAction mappingUpdatedAction; - private final IngestActionForwarder ingestForwarder; +@Deprecated +public class TransportIndexAction extends TransportSingleItemBulkWriteAction { @Inject public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, - IndicesService indicesService, IngestService ingestService, ThreadPool threadPool, - ShardStateAction shardStateAction, TransportCreateIndexAction createIndexAction, - MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex) { + IndicesService indicesService, + ThreadPool threadPool, ShardStateAction shardStateAction, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) { super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX); - this.mappingUpdatedAction = mappingUpdatedAction; - this.createIndexAction = createIndexAction; - this.autoCreateIndex = autoCreateIndex; - this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true); - this.clusterService = clusterService; - this.ingestService = ingestService; - this.ingestForwarder = new IngestActionForwarder(transportService); - clusterService.addStateApplier(this.ingestForwarder); - } - - @Override - protected void doExecute(Task task, final IndexRequest request, final ActionListener listener) { - if (Strings.hasText(request.getPipeline())) { - if (clusterService.localNode().isIngestNode()) { - processIngestIndexRequest(task, request, listener); - } else { - ingestForwarder.forwardIngestRequest(IndexAction.INSTANCE, request, listener); - } - return; - } - // if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API - ClusterState state = clusterService.state(); - if (shouldAutoCreate(request, state)) { - CreateIndexRequest createIndexRequest = new CreateIndexRequest(); - createIndexRequest.index(request.index()); - createIndexRequest.cause("auto(index api)"); - createIndexRequest.masterNodeTimeout(request.timeout()); - createIndexAction.execute(task, createIndexRequest, new ActionListener() { - @Override - public void onResponse(CreateIndexResponse result) { - innerExecute(task, request, listener); - } - - @Override - public void onFailure(Exception e) { - if (ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException) { - // we have the index, do it - try { - innerExecute(task, request, listener); - } catch (Exception inner) { - inner.addSuppressed(e); - listener.onFailure(inner); - } - } else { - listener.onFailure(e); - } - } - }); - } else { - innerExecute(task, request, listener); - } - } - - protected boolean shouldAutoCreate(IndexRequest request, ClusterState state) { - return autoCreateIndex.shouldAutoCreate(request.index(), state); - } - - @Override - protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, IndexRequest request) { - super.resolveRequest(metaData, indexMetaData, request); - MappingMetaData mappingMd =indexMetaData.mappingOrDefault(request.type()); - request.resolveRouting(metaData); - request.process(mappingMd, allowIdGeneration, indexMetaData.getIndex().getName()); - ShardId shardId = clusterService.operationRouting().shardId(clusterService.state(), - indexMetaData.getIndex().getName(), request.id(), request.routing()); - request.setShardId(shardId); - } - - protected void innerExecute(Task task, final IndexRequest request, final ActionListener listener) { - super.doExecute(task, request, listener); + actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX, + bulkAction, shardBulkAction); } @Override protected IndexResponse newResponseInstance() { return new IndexResponse(); } - - @Override - protected WritePrimaryResult shardOperationOnPrimary(IndexRequest request, IndexShard primary) throws Exception { - final Engine.IndexResult indexResult = executeIndexRequestOnPrimary(request, primary, mappingUpdatedAction); - final IndexResponse response; - if (indexResult.hasFailure() == false) { - // update the version on request so it will happen on the replicas - final long version = indexResult.getVersion(); - request.version(version); - request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); - request.seqNo(indexResult.getSeqNo()); - assert request.versionType().validateVersionForWrites(request.version()); - response = new IndexResponse(primary.shardId(), request.type(), request.id(), indexResult.getSeqNo(), - indexResult.getVersion(), indexResult.isCreated()); - } else { - response = null; - } - return new WritePrimaryResult(request, response, indexResult.getTranslogLocation(), indexResult.getFailure(), primary); - } - - @Override - protected WriteReplicaResult shardOperationOnReplica(IndexRequest request, IndexShard replica) throws Exception { - final Engine.IndexResult indexResult = executeIndexRequestOnReplica(request, replica); - return new WriteReplicaResult(request, indexResult.getTranslogLocation(), indexResult.getFailure(), replica); - } - - /** - * Execute the given {@link IndexRequest} on a replica shard, throwing a - * {@link RetryOnReplicaException} if the operation needs to be re-tried. - */ - public static Engine.IndexResult executeIndexRequestOnReplica(IndexRequest request, IndexShard replica) { - final ShardId shardId = replica.shardId(); - SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source()) - .routing(request.routing()).parent(request.parent()); - - final Engine.Index operation; - try { - operation = replica.prepareIndexOnReplica(sourceToParse, request.seqNo(), request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); - } catch (MapperParsingException e) { - return new Engine.IndexResult(e, request.version(), request.seqNo()); - } - Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); - } - return replica.index(operation); - } - - /** Utility method to prepare an index operation on primary shards */ - static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) { - SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source()) - .routing(request.routing()).parent(request.parent()); - return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); - } - - public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary, - MappingUpdatedAction mappingUpdatedAction) throws Exception { - Engine.Index operation; - try { - operation = prepareIndexOperationOnPrimary(request, primary); - } catch (MapperParsingException | IllegalArgumentException e) { - return new Engine.IndexResult(e, request.version(), request.seqNo()); - } - Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); - final ShardId shardId = primary.shardId(); - if (update != null) { - // can throw timeout exception when updating mappings or ISE for attempting to update default mappings - // which are bubbled up - try { - mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update); - } catch (IllegalArgumentException e) { - // throws IAE on conflicts merging dynamic mappings - return new Engine.IndexResult(e, request.version(), request.seqNo()); - } - try { - operation = prepareIndexOperationOnPrimary(request, primary); - } catch (MapperParsingException | IllegalArgumentException e) { - return new Engine.IndexResult(e, request.version(), request.seqNo()); - } - update = operation.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - throw new ReplicationOperation.RetryOnPrimaryException(shardId, - "Dynamic mappings are not available on the node that holds the primary yet"); - } - } - - return primary.index(operation); - } - - private void processIngestIndexRequest(Task task, IndexRequest indexRequest, ActionListener listener) { - ingestService.getPipelineExecutionService().executeIndexRequest(indexRequest, t -> { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute pipeline [{}]", indexRequest.getPipeline()), t); - listener.onFailure(t); - }, success -> { - // TransportIndexAction uses IndexRequest and same action name on the node that receives the request and the node that - // processes the primary action. This could lead to a pipeline being executed twice for the same - // index request, hence we set the pipeline to null once its execution completed. - indexRequest.setPipeline(null); - doExecute(task, indexRequest, listener); - }); - } - } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java b/core/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java index 74ce894b053..45cb83634f8 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java @@ -30,7 +30,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.node.service.NodeService; +import org.elasticsearch.node.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/core/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java b/core/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java index f603a354f4b..30843bdff9b 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.StatusToXContent; +import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.rest.RestStatus; @@ -31,7 +31,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -public class GetPipelineResponse extends ActionResponse implements StatusToXContent { +public class GetPipelineResponse extends ActionResponse implements StatusToXContentObject { private List pipelines; @@ -52,7 +52,7 @@ public class GetPipelineResponse extends ActionResponse implements StatusToXCont int size = in.readVInt(); pipelines = new ArrayList<>(size); for (int i = 0; i < size; i++) { - pipelines.add(PipelineConfiguration.readPipelineConfiguration(in)); + pipelines.add(PipelineConfiguration.readFrom(in)); } } @@ -76,9 +76,11 @@ public class GetPipelineResponse extends ActionResponse implements StatusToXCont @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); for (PipelineConfiguration pipeline : pipelines) { builder.field(pipeline.getId(), pipeline.getConfigAsMap()); } + builder.endObject(); return builder; } } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java b/core/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java index 8bac5c7b804..f64b36d47ae 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java @@ -30,7 +30,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.node.service.NodeService; +import org.elasticsearch.node.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java b/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java index 10416146ba8..a28950b24c1 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java @@ -19,32 +19,40 @@ package org.elasticsearch.action.ingest; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Objects; -import static org.elasticsearch.action.ValidateActions.addValidationError; - public class PutPipelineRequest extends AcknowledgedRequest { private String id; private BytesReference source; + private XContentType xContentType; + /** + * Create a new pipeline request + * @deprecated use {@link #PutPipelineRequest(String, BytesReference, XContentType)} to avoid content type auto-detection + */ + @Deprecated public PutPipelineRequest(String id, BytesReference source) { - if (id == null) { - throw new IllegalArgumentException("id is missing"); - } - if (source == null) { - throw new IllegalArgumentException("source is missing"); - } + this(id, source, XContentFactory.xContentType(source)); + } - this.id = id; - this.source = source; + /** + * Create a new pipeline request with the id and source along with the content type of the source + */ + public PutPipelineRequest(String id, BytesReference source, XContentType xContentType) { + this.id = Objects.requireNonNull(id); + this.source = Objects.requireNonNull(source); + this.xContentType = Objects.requireNonNull(xContentType); } PutPipelineRequest() { @@ -63,11 +71,20 @@ public class PutPipelineRequest extends AcknowledgedRequest return source; } + public XContentType getXContentType() { + return xContentType; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); id = in.readString(); source = in.readBytesReference(); + if (in.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting + xContentType = XContentType.readFrom(in); + } else { + xContentType = XContentFactory.xContentType(source); + } } @Override @@ -75,5 +92,8 @@ public class PutPipelineRequest extends AcknowledgedRequest super.writeTo(out); out.writeString(id); out.writeBytesReference(source); + if (out.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting + xContentType.writeTo(out); + } } } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java index bd927115fb5..c03b3b84f8b 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentType; public class PutPipelineRequestBuilder extends ActionRequestBuilder { @@ -29,8 +30,13 @@ public class PutPipelineRequestBuilder extends ActionRequestBuilder { + /** + * Create a new builder for {@link SimulatePipelineRequest}s + */ public SimulatePipelineRequestBuilder(ElasticsearchClient client, SimulatePipelineAction action) { super(client, action, new SimulatePipelineRequest()); } + /** + * Create a new builder for {@link SimulatePipelineRequest}s + * @deprecated use {@link #SimulatePipelineRequestBuilder(ElasticsearchClient, SimulatePipelineAction, BytesReference, XContentType)} to + * avoid content type auto-detection on the source bytes + */ + @Deprecated public SimulatePipelineRequestBuilder(ElasticsearchClient client, SimulatePipelineAction action, BytesReference source) { super(client, action, new SimulatePipelineRequest(source)); } + /** + * Create a new builder for {@link SimulatePipelineRequest}s + */ + public SimulatePipelineRequestBuilder(ElasticsearchClient client, SimulatePipelineAction action, BytesReference source, + XContentType xContentType) { + super(client, action, new SimulatePipelineRequest(source, xContentType)); + } + + /** + * Set the id for the pipeline to simulate + */ public SimulatePipelineRequestBuilder setId(String id) { request.setId(id); return this; } + /** + * Enable or disable verbose mode + */ public SimulatePipelineRequestBuilder setVerbose(boolean verbose) { request.setVerbose(verbose); return this; diff --git a/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java b/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java index 83029a1aab5..e9ea1a77507 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -30,7 +30,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -public class SimulatePipelineResponse extends ActionResponse implements ToXContent { +public class SimulatePipelineResponse extends ActionResponse implements ToXContentObject { private String pipelineId; private boolean verbose; private List results; @@ -88,11 +88,13 @@ public class SimulatePipelineResponse extends ActionResponse implements ToXConte @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.startArray(Fields.DOCUMENTS); for (SimulateDocumentResult response : results) { response.toXContent(builder, params); } builder.endArray(); + builder.endObject(); return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java b/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java index 4f9a219c8ad..3f67007df69 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -28,7 +27,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.node.service.NodeService; +import org.elasticsearch.node.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -48,7 +47,7 @@ public class SimulatePipelineTransportAction extends HandledTransportAction listener) { - final Map source = XContentHelper.convertToMap(request.getSource(), false).v2(); + final Map source = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); final SimulatePipelineRequest.Parsed simulateRequest; try { diff --git a/core/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java b/core/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java index c978cc56d9e..3ebcb6cb6f3 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -99,10 +98,10 @@ class SimulateProcessorResult implements Writeable, ToXContent { if (failure != null && ingestDocument != null) { builder.startObject("ignored_error"); - ElasticsearchException.renderException(builder, params, failure); + ElasticsearchException.generateFailureXContent(builder, params, failure, true); builder.endObject(); } else if (failure != null) { - ElasticsearchException.renderException(builder, params, failure); + ElasticsearchException.generateFailureXContent(builder, params, failure, true); } if (ingestDocument != null) { diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 2479ff86750..32585c62384 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -25,33 +25,41 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.FetchSearchResultProvider; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.Transport; +import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; +import java.util.function.IntConsumer; abstract class AbstractSearchAsyncAction extends AbstractAsyncAction { private static final float DEFAULT_INDEX_BOOST = 1.0f; - protected final Logger logger; protected final SearchTransportService searchTransportService; private final Executor executor; @@ -59,39 +67,41 @@ abstract class AbstractSearchAsyncAction private final GroupShardsIterator shardsIts; protected final SearchRequest request; /** Used by subclasses to resolve node ids to DiscoveryNodes. **/ - protected final Function nodeIdToDiscoveryNode; + protected final Function nodeIdToConnection; + protected final SearchPhaseController searchPhaseController; protected final SearchTask task; - protected final int expectedSuccessfulOps; + private final int expectedSuccessfulOps; private final int expectedTotalOps; - protected final AtomicInteger successfulOps = new AtomicInteger(); + private final AtomicInteger successfulOps = new AtomicInteger(); private final AtomicInteger totalOps = new AtomicInteger(); - protected final AtomicArray firstResults; + private final AtomicArray initialResults; private final Map aliasFilter; private final Map concreteIndexBoosts; private final long clusterStateVersion; private volatile AtomicArray shardFailures; private final Object shardFailuresMutex = new Object(); - protected volatile ScoreDoc[] sortedShardDocs; protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, - Function nodeIdToDiscoveryNode, + Function nodeIdToConnection, Map aliasFilter, Map concreteIndexBoosts, - Executor executor, SearchRequest request, ActionListener listener, - GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) { + SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, + ActionListener listener, GroupShardsIterator shardsIts, long startTime, + long clusterStateVersion, SearchTask task) { super(startTime); this.logger = logger; + this.searchPhaseController = searchPhaseController; this.searchTransportService = searchTransportService; this.executor = executor; this.request = request; this.task = task; this.listener = listener; - this.nodeIdToDiscoveryNode = nodeIdToDiscoveryNode; + this.nodeIdToConnection = nodeIdToConnection; this.clusterStateVersion = clusterStateVersion; this.shardsIts = shardsIts; expectedSuccessfulOps = shardsIts.size(); // we need to add 1 for non active partition, since we count it in the total! expectedTotalOps = shardsIts.totalSizeWith1ForEmpty(); - firstResults = new AtomicArray<>(shardsIts.size()); + initialResults = new AtomicArray<>(shardsIts.size()); this.aliasFilter = aliasFilter; this.concreteIndexBoosts = concreteIndexBoosts; } @@ -109,45 +119,49 @@ abstract class AbstractSearchAsyncAction shardIndex++; final ShardRouting shard = shardIt.nextOrNull(); if (shard != null) { - performFirstPhase(shardIndex, shardIt, shard); + performInitialPhase(shardIndex, shardIt, shard); } else { // really, no shards active in this group - onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); + onInitialPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); } } } - void performFirstPhase(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) { + void performInitialPhase(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) { if (shard == null) { + // TODO upgrade this to an assert... // no more active shards... (we should not really get here, but just for safety) - onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); + onInitialPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); } else { - final DiscoveryNode node = nodeIdToDiscoveryNode.apply(shard.currentNodeId()); - if (node == null) { - onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); - } else { + try { + final Transport.Connection connection = nodeIdToConnection.apply(shard.currentNodeId()); AliasFilter filter = this.aliasFilter.get(shard.index().getUUID()); assert filter != null; float indexBoost = concreteIndexBoosts.getOrDefault(shard.index().getUUID(), DEFAULT_INDEX_BOOST); ShardSearchTransportRequest transportRequest = new ShardSearchTransportRequest(request, shardIt.shardId(), shardsIts.size(), filter, indexBoost, startTime()); - sendExecuteFirstPhase(node, transportRequest , new ActionListener() { - @Override - public void onResponse(FirstResult result) { - onFirstPhaseResult(shardIndex, shard.currentNodeId(), result, shardIt); - } + sendExecuteFirstPhase(connection, transportRequest, new ActionListener() { + @Override + public void onResponse(FirstResult result) { + onInitialPhaseResult(shardIndex, shard.currentNodeId(), result, shardIt); + } - @Override - public void onFailure(Exception t) { - onFirstPhaseResult(shardIndex, shard, node.getId(), shardIt, t); - } - }); + @Override + public void onFailure(Exception t) { + onInitialPhaseResult(shardIndex, shard, connection.getNode().getId(), shardIt, t); + } + }); + } catch (ConnectTransportException | IllegalArgumentException ex) { + // we are getting the connection early here so we might run into nodes that are not connected. in that case we move on to + // the next shard. previously when using discovery nodes here we had a special case for null when a node was not connected + // at all which is not not needed anymore. + onInitialPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, ex); } } } - private void onFirstPhaseResult(int shardIndex, String nodeId, FirstResult result, ShardIterator shardIt) { + private void onInitialPhaseResult(int shardIndex, String nodeId, FirstResult result, ShardIterator shardIt) { result.shardTarget(new SearchShardTarget(nodeId, shardIt.shardId())); processFirstPhaseResult(shardIndex, result); // we need to increment successful ops first before we compare the exit condition otherwise if we @@ -158,27 +172,32 @@ abstract class AbstractSearchAsyncAction // and when that happens, we break on total ops, so we must maintain them final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1); if (xTotalOps == expectedTotalOps) { - try { - innerMoveToSecondPhase(); - } catch (Exception e) { - if (logger.isDebugEnabled()) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "{}: Failed to execute [{}] while moving to second phase", - shardIt.shardId(), - request), - e); - } - raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures())); - } + executePhase(initialPhaseName(), innerGetNextPhase(), null); } else if (xTotalOps > expectedTotalOps) { raiseEarlyFailure(new IllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared " + "to expected [" + expectedTotalOps + "]")); } } - private void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId, - final ShardIterator shardIt, Exception e) { + protected void executePhase(String phaseName, CheckedRunnable phase, Exception suppressedException) { + try { + phase.run(); + } catch (Exception e) { + if (suppressedException != null) { + e.addSuppressed(suppressedException); + } + if (logger.isDebugEnabled()) { + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "Failed to execute [{}] while moving to second phase", request), + e); + } + raiseEarlyFailure(new ReduceSearchPhaseException(phaseName, "", e, buildShardFailures())); + } + } + + private void onInitialPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId, + final ShardIterator shardIt, Exception e) { // we always add the shard failure for a specific shard instance // we do make sure to clean it on a successful response from a shard SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId()); @@ -201,18 +220,13 @@ abstract class AbstractSearchAsyncAction final ShardSearchFailure[] shardSearchFailures = buildShardFailures(); if (successfulOps.get() == 0) { if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("All shards failed for phase: [{}]", firstPhaseName()), e); + logger.debug((Supplier) () -> new ParameterizedMessage("All shards failed for phase: [{}]", initialPhaseName()), e); } // no successful ops, raise an exception - raiseEarlyFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", e, shardSearchFailures)); + raiseEarlyFailure(new SearchPhaseExecutionException(initialPhaseName(), "all shards failed", e, shardSearchFailures)); } else { - try { - innerMoveToSecondPhase(); - } catch (Exception inner) { - inner.addSuppressed(e); - raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", inner, shardSearchFailures)); - } + executePhase(initialPhaseName(), innerGetNextPhase(), e); } } else { final ShardRouting nextShard = shardIt.nextOrNull(); @@ -227,10 +241,10 @@ abstract class AbstractSearchAsyncAction e); if (!lastShard) { try { - performFirstPhase(shardIndex, shardIt, nextShard); + performInitialPhase(shardIndex, shardIt, nextShard); } catch (Exception inner) { inner.addSuppressed(e); - onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, inner); + onInitialPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, inner); } } else { // no more shards active, add a failure @@ -290,10 +304,10 @@ abstract class AbstractSearchAsyncAction } private void raiseEarlyFailure(Exception e) { - for (AtomicArray.Entry entry : firstResults.asList()) { + for (AtomicArray.Entry entry : initialResults.asList()) { try { - DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.shardTarget().nodeId()); - sendReleaseSearchContext(entry.value.id(), node); + Transport.Connection connection = nodeIdToConnection.apply(entry.value.shardTarget().getNodeId()); + sendReleaseSearchContext(entry.value.id(), connection); } catch (Exception inner) { inner.addSuppressed(e); logger.trace("failed to release context", inner); @@ -302,48 +316,23 @@ abstract class AbstractSearchAsyncAction listener.onFailure(e); } - /** - * Releases shard targets that are not used in the docsIdsToLoad. - */ - protected void releaseIrrelevantSearchContexts(AtomicArray queryResults, - AtomicArray docIdsToLoad) { - if (docIdsToLoad == null) { - return; - } - // we only release search context that we did not fetch from if we are not scrolling - if (request.scroll() == null) { - for (AtomicArray.Entry entry : queryResults.asList()) { - QuerySearchResult queryResult = entry.value.queryResult(); - if (queryResult.hasHits() - && docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs - try { - DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.queryResult().shardTarget().nodeId()); - sendReleaseSearchContext(entry.value.queryResult().id(), node); - } catch (Exception e) { - logger.trace("failed to release context", e); - } - } - } + protected void sendReleaseSearchContext(long contextId, Transport.Connection connection) { + if (connection != null) { + searchTransportService.sendFreeContext(connection, contextId, request); } } - protected void sendReleaseSearchContext(long contextId, DiscoveryNode node) { - if (node != null) { - searchTransportService.sendFreeContext(node, contextId, request); - } - } - - protected ShardFetchSearchRequest createFetchRequest(QuerySearchResult queryResult, AtomicArray.Entry entry, + protected ShardFetchSearchRequest createFetchRequest(QuerySearchResult queryResult, int index, IntArrayList entry, ScoreDoc[] lastEmittedDocPerShard) { - final ScoreDoc lastEmittedDoc = (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[entry.index] : null; - return new ShardFetchSearchRequest(request, queryResult.id(), entry.value, lastEmittedDoc); + final ScoreDoc lastEmittedDoc = (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[index] : null; + return new ShardFetchSearchRequest(request, queryResult.id(), entry, lastEmittedDoc); } - protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, + protected abstract void sendExecuteFirstPhase(Transport.Connection connection, ShardSearchTransportRequest request, ActionListener listener); protected final void processFirstPhaseResult(int shardIndex, FirstResult result) { - firstResults.set(shardIndex, result); + initialResults.set(shardIndex, result); if (logger.isTraceEnabled()) { logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null); @@ -358,12 +347,12 @@ abstract class AbstractSearchAsyncAction } } - final void innerMoveToSecondPhase() throws Exception { + final CheckedRunnable innerGetNextPhase() { if (logger.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); boolean hadOne = false; - for (int i = 0; i < firstResults.length(); i++) { - FirstResult result = firstResults.get(i); + for (int i = 0; i < initialResults.length(); i++) { + FirstResult result = initialResults.get(i); if (result == null) { continue; // failure } @@ -377,15 +366,191 @@ abstract class AbstractSearchAsyncAction logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterStateVersion); } - moveToSecondPhase(); + return getNextPhase(initialResults); } - protected abstract void moveToSecondPhase() throws Exception; + protected abstract CheckedRunnable getNextPhase(AtomicArray initialResults); - protected abstract String firstPhaseName(); + protected abstract String initialPhaseName(); protected Executor getExecutor() { return executor; } + // this is a simple base class to simplify fan out to shards and collect + final class CountedCollector { + private final AtomicArray resultArray; + private final CountDown counter; + private final IntConsumer onFinish; + + CountedCollector(AtomicArray resultArray, int expectedOps, IntConsumer onFinish) { + this.resultArray = resultArray; + this.counter = new CountDown(expectedOps); + this.onFinish = onFinish; + } + + void countDown() { + if (counter.countDown()) { + onFinish.accept(successfulOps.get()); + } + } + + void onResult(int index, R result, SearchShardTarget target) { + try { + result.shardTarget(target); + resultArray.set(index, result); + } finally { + countDown(); + } + } + + void onFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Exception e) { + try { + addShardFailure(shardIndex, shardTarget, e); + } finally { + successfulOps.decrementAndGet(); + countDown(); + } + } + + } + + /* + * At this point AbstractSearchAsyncAction is just a base-class for the first phase of a search where we have multiple replicas + * for each shardID. If one of them is not available we move to the next one. Yet, once we passed that first stage we have to work with + * the shards we succeeded on the initial phase. + * Unfortunately, subsequent phases are not fully detached from the initial phase since they are all non-static inner classes. + * In future changes this will be changed to detach the inner classes to test them in isolation and to simplify their creation. + * The AbstractSearchAsyncAction should be final and it should just get a factory for the next phase instead of requiring subclasses + * etc. + */ + final class FetchPhase implements CheckedRunnable { + private final AtomicArray fetchResults; + private final SearchPhaseController searchPhaseController; + private final AtomicArray queryResults; + + FetchPhase(AtomicArray queryResults, + SearchPhaseController searchPhaseController) { + this.fetchResults = new AtomicArray<>(queryResults.length()); + this.searchPhaseController = searchPhaseController; + this.queryResults = queryResults; + } + + @Override + public void run() throws Exception { + final boolean isScrollRequest = request.scroll() != null; + ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(isScrollRequest, queryResults); + final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(queryResults.length(), sortedShardDocs); + final IntConsumer finishPhase = successOpts + -> sendResponseAsync("fetch", searchPhaseController, sortedShardDocs, queryResults, fetchResults); + if (sortedShardDocs.length == 0) { // no docs to fetch -- sidestep everything and return + queryResults.asList().stream() + .map(e -> e.value.queryResult()) + .forEach(this::releaseIrrelevantSearchContext); // we have to release contexts here to free up resources + finishPhase.accept(successfulOps.get()); + } else { + final ScoreDoc[] lastEmittedDocPerShard = isScrollRequest ? + searchPhaseController.getLastEmittedDocPerShard(queryResults.asList(), sortedShardDocs, queryResults.length()) + : null; + final CountedCollector counter = new CountedCollector<>(fetchResults, + docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not + finishPhase); + for (int i = 0; i < docIdsToLoad.length; i++) { + IntArrayList entry = docIdsToLoad[i]; + QuerySearchResultProvider queryResult = queryResults.get(i); + if (entry == null) { // no results for this shard ID + if (queryResult != null) { + // if we got some hits from this shard we have to release the context there + // we do this as we go since it will free up resources and passing on the request on the + // transport layer is cheap. + releaseIrrelevantSearchContext(queryResult.queryResult()); + } + // in any case we count down this result since we don't talk to this shard anymore + counter.countDown(); + } else { + Transport.Connection connection = nodeIdToConnection.apply(queryResult.shardTarget().getNodeId()); + ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), i, entry, + lastEmittedDocPerShard); + executeFetch(i, queryResult.shardTarget(), counter, fetchSearchRequest, queryResult.queryResult(), + connection); + } + } + } + } + + private void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, + final CountedCollector counter, + final ShardFetchSearchRequest fetchSearchRequest, final QuerySearchResult querySearchResult, + final Transport.Connection connection) { + searchTransportService.sendExecuteFetch(connection, fetchSearchRequest, task, new ActionListener() { + @Override + public void onResponse(FetchSearchResult result) { + counter.onResult(shardIndex, result, shardTarget); + } + + @Override + public void onFailure(Exception e) { + try { + if (logger.isDebugEnabled()) { + logger.debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", + fetchSearchRequest.id()), e); + } + counter.onFailure(shardIndex, shardTarget, e); + } finally { + // the search context might not be cleared on the node where the fetch was executed for example + // because the action was rejected by the thread pool. in this case we need to send a dedicated + // request to clear the search context. + releaseIrrelevantSearchContext(querySearchResult); + } + } + }); + } + + /** + * Releases shard targets that are not used in the docsIdsToLoad. + */ + private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) { + // we only release search context that we did not fetch from if we are not scrolling + // and if it has at lease one hit that didn't make it to the global topDocs + if (request.scroll() == null && queryResult.hasHits()) { + try { + Transport.Connection connection = nodeIdToConnection.apply(queryResult.shardTarget().getNodeId()); + sendReleaseSearchContext(queryResult.id(), connection); + } catch (Exception e) { + logger.trace("failed to release context", e); + } + } + } + } + + /** + * Sends back a result to the user. This method will create the sorted docs if they are null and will build the scrollID for the + * response. Note: This method will send the response in a different thread depending on the executor. + */ + final void sendResponseAsync(String phase, SearchPhaseController searchPhaseController, ScoreDoc[] sortedDocs, + AtomicArray queryResultsArr, + AtomicArray fetchResultsArr) { + getExecutor().execute(new ActionRunnable(listener) { + @Override + public void doRun() throws IOException { + final boolean isScrollRequest = request.scroll() != null; + final ScoreDoc[] theScoreDocs = sortedDocs == null ? searchPhaseController.sortDocs(isScrollRequest, queryResultsArr) + : sortedDocs; + final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, theScoreDocs, queryResultsArr, + fetchResultsArr); + String scrollId = isScrollRequest ? TransportSearchHelper.buildScrollId(request.searchType(), queryResultsArr) : null; + listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), + buildTookInMillis(), buildShardFailures())); + } + + @Override + public void onFailure(Exception e) { + ReduceSearchPhaseException failure = new ReduceSearchPhaseException(phase, "", e, buildShardFailures()); + if (logger.isDebugEnabled()) { + logger.debug("failed to reduce search", failure); + } + super.onFailure(failure); + } + }); + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java b/core/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java index ff8314acce5..d1ec790d01a 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.StatusToXContent; +import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; @@ -31,7 +31,7 @@ import java.io.IOException; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; -public class ClearScrollResponse extends ActionResponse implements StatusToXContent { +public class ClearScrollResponse extends ActionResponse implements StatusToXContentObject { private boolean succeeded; private int numFreed; @@ -66,8 +66,10 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.field(Fields.SUCCEEDED, succeeded); builder.field(Fields.NUMFREED, numFreed); + builder.endObject(); return builder; } @@ -89,5 +91,4 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont static final String SUCCEEDED = "succeeded"; static final String NUMFREED = "num_freed"; } - } diff --git a/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index 317b775a403..4d42ad334a9 100644 --- a/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -23,12 +23,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import java.io.IOException; import java.util.Arrays; @@ -37,7 +37,7 @@ import java.util.Iterator; /** * A multi search response. */ -public class MultiSearchResponse extends ActionResponse implements Iterable, ToXContent { +public class MultiSearchResponse extends ActionResponse implements Iterable, ToXContentObject { /** * A search response item, holding the actual search response, or an error message if it failed. @@ -151,39 +151,31 @@ public class MultiSearchResponse extends ActionResponse implements Iterable connectedNodes = Collections.newSetFromMap(new ConcurrentHashMap<>()); + private final Supplier nodeSupplier; + private final String clusterAlias; + private final int maxNumRemoteConnections; + private final Predicate nodePredicate; + private volatile List seedNodes; + private final ConnectHandler connectHandler; + + /** + * Creates a new {@link RemoteClusterConnection} + * @param settings the nodes settings object + * @param clusterAlias the configured alias of the cluster to connect to + * @param seedNodes a list of seed nodes to discover eligible nodes from + * @param transportService the local nodes transport service + * @param maxNumRemoteConnections the maximum number of connections to the remote cluster + * @param nodePredicate a predicate to filter eligible remote nodes to connect to + */ + RemoteClusterConnection(Settings settings, String clusterAlias, List seedNodes, + TransportService transportService, int maxNumRemoteConnections, Predicate nodePredicate) { + super(settings); + this.transportService = transportService; + this.maxNumRemoteConnections = maxNumRemoteConnections; + this.nodePredicate = nodePredicate; + this.clusterAlias = clusterAlias; + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); + builder.setConnectTimeout(TcpTransport.TCP_CONNECT_TIMEOUT.get(settings)); + builder.setHandshakeTimeout(TcpTransport.TCP_CONNECT_TIMEOUT.get(settings)); + builder.addConnections(6, TransportRequestOptions.Type.REG, TransportRequestOptions.Type.PING); // TODO make this configurable? + builder.addConnections(0, // we don't want this to be used for anything else but search + TransportRequestOptions.Type.BULK, + TransportRequestOptions.Type.STATE, + TransportRequestOptions.Type.RECOVERY); + remoteProfile = builder.build(); + nodeSupplier = new Supplier() { + private volatile Iterator current; + @Override + public DiscoveryNode get() { + if (current == null || current.hasNext() == false) { + current = connectedNodes.iterator(); + if (current.hasNext() == false) { + throw new IllegalStateException("No node available for cluster: " + clusterAlias + " nodes: " + connectedNodes); + } + } + return current.next(); + } + }; + this.seedNodes = Collections.unmodifiableList(seedNodes); + this.connectHandler = new ConnectHandler(); + transportService.addConnectionListener(this); + } + + /** + * Updates the list of seed nodes for this cluster connection + */ + synchronized void updateSeedNodes(List seedNodes, ActionListener connectListener) { + this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes)); + connectHandler.connect(connectListener); + } + + @Override + public void onNodeDisconnected(DiscoveryNode node) { + boolean remove = connectedNodes.remove(node); + if (remove && connectedNodes.size() < maxNumRemoteConnections) { + // try to reconnect and fill up the slot of the disconnected node + connectHandler.forceConnect(); + } + } + + /** + * Fetches all shards for the search request from this remote connection. This is used to later run the search on the remote end. + */ + public void fetchSearchShards(SearchRequest searchRequest, final List indices, + ActionListener listener) { + if (connectedNodes.isEmpty()) { + // just in case if we are not connected for some reason we try to connect and if we fail we have to notify the listener + // this will cause some back pressure on the search end and eventually will cause rejections but that's fine + // we can't proceed with a search on a cluster level. + // in the future we might want to just skip the remote nodes in such a case but that can already be implemented on the caller + // end since they provide the listener. + connectHandler.connect(ActionListener.wrap((x) -> fetchShardsInternal(searchRequest, indices, listener), listener::onFailure)); + } else { + fetchShardsInternal(searchRequest, indices, listener); + } + } + + private void fetchShardsInternal(SearchRequest searchRequest, List indices, + final ActionListener listener) { + final DiscoveryNode node = nodeSupplier.get(); + ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices.toArray(new String[indices.size()])) + .indicesOptions(searchRequest.indicesOptions()).local(true).preference(searchRequest.preference()) + .routing(searchRequest.routing()); + transportService.sendRequest(node, ClusterSearchShardsAction.NAME, searchShardsRequest, + new TransportResponseHandler() { + + @Override + public ClusterSearchShardsResponse newInstance() { + return new ClusterSearchShardsResponse(); + } + + @Override + public void handleResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { + listener.onResponse(clusterSearchShardsResponse); + } + + @Override + public void handleException(TransportException e) { + listener.onFailure(e); + } + + @Override + public String executor() { + return ThreadPool.Names.SEARCH; + } + }); + } + + /** + * Returns a connection to the remote cluster. This connection might be a proxy connection that redirects internally to the + * given node. + */ + Transport.Connection getConnection(DiscoveryNode remoteClusterNode) { + DiscoveryNode discoveryNode = nodeSupplier.get(); + Transport.Connection connection = transportService.getConnection(discoveryNode); + return new Transport.Connection() { + @Override + public DiscoveryNode getNode() { + return remoteClusterNode; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + connection.sendRequest(requestId, TransportActionProxy.getProxyAction(action), + TransportActionProxy.wrapRequest(remoteClusterNode, request), options); + } + + @Override + public void close() throws IOException { + assert false: "proxy connections must not be closed"; + } + }; + } + + @Override + public void close() throws IOException { + connectHandler.close(); + } + + public boolean isClosed() { + return connectHandler.isClosed(); + } + + /** + * The connect handler manages node discovery and the actual connect to the remote cluster. + * There is at most one connect job running at any time. If such a connect job is triggered + * while another job is running the provided listeners are queued and batched up until the current running job returns. + * + * The handler has a built-in queue that can hold up to 100 connect attempts and will reject requests once the queue is full. + * In a scenario when a remote cluster becomes unavailable we will queue requests up but if we can't connect quick enough + * we will just reject the connect trigger which will lead to failing searches. + */ + private class ConnectHandler implements Closeable { + private final Semaphore running = new Semaphore(1); + private final AtomicBoolean closed = new AtomicBoolean(false); + private final BlockingQueue> queue = new ArrayBlockingQueue<>(100); + private final CancellableThreads cancellableThreads = new CancellableThreads(); + + /** + * Triggers a connect round iff there are pending requests queued up and if there is no + * connect round currently running. + */ + void maybeConnect() { + connect(null); + } + + /** + * Triggers a connect round unless there is one running already. If there is a connect round running, the listener will either + * be queued or rejected and failed. + */ + void connect(ActionListener connectListener) { + connect(connectListener, false); + } + + /** + * Triggers a connect round unless there is one already running. In contrast to {@link #maybeConnect()} will this method also + * trigger a connect round if there is no listener queued up. + */ + void forceConnect() { + connect(null, true); + } + + private void connect(ActionListener connectListener, boolean forceRun) { + final boolean runConnect; + final Collection> toNotify; + synchronized (queue) { + if (connectListener != null && queue.offer(connectListener) == false) { + connectListener.onFailure(new RejectedExecutionException("connect queue is full")); + return; + } + if (forceRun == false && queue.isEmpty()) { + return; + } + runConnect = running.tryAcquire(); + if (runConnect) { + toNotify = new ArrayList<>(); + queue.drainTo(toNotify); + if (closed.get()) { + running.release(); + ActionListener.onFailure(toNotify, new AlreadyClosedException("connect handler is already closed")); + return; + } + } else { + toNotify = Collections.emptyList(); + } + } + if (runConnect) { + forkConnect(toNotify); + } + } + + private void forkConnect(final Collection> toNotify) { + ThreadPool threadPool = transportService.getThreadPool(); + ExecutorService executor = threadPool.executor(ThreadPool.Names.MANAGEMENT); + executor.submit(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + synchronized (queue) { + running.release(); + } + try { + ActionListener.onFailure(toNotify, e); + } finally { + maybeConnect(); + } + } + + @Override + protected void doRun() throws Exception { + ActionListener listener = ActionListener.wrap((x) -> { + synchronized (queue) { + running.release(); + } + try { + ActionListener.onResponse(toNotify, x); + } finally { + maybeConnect(); + } + + }, (e) -> { + synchronized (queue) { + running.release(); + } + try { + ActionListener.onFailure(toNotify, e); + } finally { + maybeConnect(); + } + }); + collectRemoteNodes(seedNodes.iterator(), transportService, listener); + } + }); + + } + + void collectRemoteNodes(Iterator seedNodes, + final TransportService transportService, ActionListener listener) { + if (Thread.currentThread().isInterrupted()) { + listener.onFailure(new InterruptedException("remote connect thread got interrupted")); + } + try { + if (seedNodes.hasNext()) { + cancellableThreads.executeIO(() -> { + final DiscoveryNode seedNode = seedNodes.next(); + final DiscoveryNode handshakeNode; + Transport.Connection connection = transportService.openConnection(seedNode, + ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, null, null)); + boolean success = false; + try { + handshakeNode = transportService.handshake(connection, remoteProfile.getHandshakeTimeout().millis(), + (c) -> true); + if (nodePredicate.test(handshakeNode) && connectedNodes.size() < maxNumRemoteConnections) { + transportService.connectToNode(handshakeNode, remoteProfile); + connectedNodes.add(handshakeNode); + } + ClusterStateRequest request = new ClusterStateRequest(); + request.clear(); + request.nodes(true); + // here we pass on the connection since we can only close it once the sendRequest returns otherwise + // due to the async nature (it will return before it's actually sent) this can cause the request to fail + // due to an already closed connection. + transportService.sendRequest(connection, + ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, + new SniffClusterStateResponseHandler(transportService, connection, listener, seedNodes, + cancellableThreads)); + success = true; + } finally { + if (success == false) { + connection.close(); + } + } + }); + } else { + listener.onFailure(new IllegalStateException("no seed node left")); + } + } catch (CancellableThreads.ExecutionCancelledException ex) { + listener.onFailure(ex); // we got canceled - fail the listener and step out + } catch (ConnectTransportException | IOException | IllegalStateException ex) { + // ISE if we fail the handshake with an version incompatible node + if (seedNodes.hasNext()) { + logger.debug((Supplier) () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", + clusterAlias), ex); + collectRemoteNodes(seedNodes, transportService, listener); + } else { + listener.onFailure(ex); + } + } + } + + @Override + public void close() throws IOException { + try { + if (closed.compareAndSet(false, true)) { + cancellableThreads.cancel("connect handler is closed"); + running.acquire(); // acquire the semaphore to ensure all connections are closed and all thread joined + running.release(); + maybeConnect(); // now go and notify pending listeners + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + final boolean isClosed() { + return closed.get(); + } + + /* This class handles the _state response from the remote cluster when sniffing nodes to connect to */ + private class SniffClusterStateResponseHandler implements TransportResponseHandler { + + private final TransportService transportService; + private final Transport.Connection connection; + private final ActionListener listener; + private final Iterator seedNodes; + private final CancellableThreads cancellableThreads; + + SniffClusterStateResponseHandler(TransportService transportService, Transport.Connection connection, + ActionListener listener, Iterator seedNodes, + CancellableThreads cancellableThreads) { + this.transportService = transportService; + this.connection = connection; + this.listener = listener; + this.seedNodes = seedNodes; + this.cancellableThreads = cancellableThreads; + } + + @Override + public ClusterStateResponse newInstance() { + return new ClusterStateResponse(); + } + + @Override + public void handleResponse(ClusterStateResponse response) { + try { + try (Closeable theConnection = connection) { // the connection is unused - see comment in #collectRemoteNodes + // we have to close this connection before we notify listeners - this is mainly needed for test correctness + // since if we do it afterwards we might fail assertions that check if all high level connections are closed. + // from a code correctness perspective we could also close it afterwards. This try/with block will + // maintain the possibly exceptions thrown from within the try block and suppress the ones that are possible thrown + // by closing the connection + cancellableThreads.executeIO(() -> { + DiscoveryNodes nodes = response.getState().nodes(); + Iterable nodesIter = nodes.getNodes()::valuesIt; + for (DiscoveryNode node : nodesIter) { + if (nodePredicate.test(node) && connectedNodes.size() < maxNumRemoteConnections) { + try { + transportService.connectToNode(node, remoteProfile); // noop if node is connected + connectedNodes.add(node); + } catch (ConnectTransportException | IllegalStateException ex) { + // ISE if we fail the handshake with an version incompatible node + // fair enough we can't connect just move on + logger.debug((Supplier) + () -> new ParameterizedMessage("failed to connect to node {}", node), ex); + } + } + } + }); + } + listener.onResponse(null); + } catch (CancellableThreads.ExecutionCancelledException ex) { + listener.onFailure(ex); // we got canceled - fail the listener and step out + } catch (Exception ex) { + logger.warn((Supplier) + () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", + clusterAlias), ex); + collectRemoteNodes(seedNodes, transportService, listener); + } + } + + @Override + public void handleException(TransportException exp) { + logger.warn((Supplier) + () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), + exp); + try { + IOUtils.closeWhileHandlingException(connection); + } finally { + // once the connection is closed lets try the next node + collectRemoteNodes(seedNodes, transportService, listener); + } + } + + @Override + public String executor() { + return ThreadPool.Names.MANAGEMENT; + } + } + } + + boolean assertNoRunningConnections() { // for testing only + assert connectHandler.running.availablePermits() == 1; + return true; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java b/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java new file mode 100644 index 00000000000..92b4ca5f0b3 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java @@ -0,0 +1,395 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.search; + +import org.apache.logging.log4j.util.Supplier; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.PlainShardIterator; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportService; + +import java.io.Closeable; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Basic service for accessing remote clusters via gateway nodes + */ +public final class RemoteClusterService extends AbstractComponent implements Closeable { + + static final String LOCAL_CLUSTER_GROUP_KEY = ""; + + /** + * A list of initial seed nodes to discover eligible nodes from the remote cluster + */ + public static final Setting.AffixSetting> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting("search.remote.", + "seeds", (key) -> Setting.listSetting(key, Collections.emptyList(), RemoteClusterService::parseSeedAddress, + Setting.Property.NodeScope, Setting.Property.Dynamic)); + /** + * The maximum number of connections that will be established to a remote cluster. For instance if there is only a single + * seed node, other nodes will be discovered up to the given number of nodes in this setting. The default is 3. + */ + public static final Setting REMOTE_CONNECTIONS_PER_CLUSTER = Setting.intSetting("search.remote.connections_per_cluster", + 3, 1, Setting.Property.NodeScope); + + /** + * The initial connect timeout for remote cluster connections + */ + public static final Setting REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING = + Setting.positiveTimeSetting("search.remote.initial_connect_timeout", TimeValue.timeValueSeconds(30), Setting.Property.NodeScope); + + /** + * The name of a node attribute to select nodes that should be connected to in the remote cluster. + * For instance a node can be configured with node.attr.gateway: true in order to be eligible as a gateway node between + * clusters. In that case search.remote.node.attr: gateway can be used to filter out other nodes in the remote cluster. + * The value of the setting is expected to be a boolean, true for nodes that can become gateways, false otherwise. + */ + public static final Setting REMOTE_NODE_ATTRIBUTE = Setting.simpleString("search.remote.node.attr", + Setting.Property.NodeScope); + + private static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':'; + + private final TransportService transportService; + private final int numRemoteConnections; + private volatile Map remoteClusters = Collections.emptyMap(); + + RemoteClusterService(Settings settings, TransportService transportService) { + super(settings); + this.transportService = transportService; + numRemoteConnections = REMOTE_CONNECTIONS_PER_CLUSTER.get(settings); + } + + /** + * This method updates the list of remote clusters. It's intended to be used as an update consumer on the settings infrastructure + * @param seeds a cluster alias to discovery node mapping representing the remote clusters seeds nodes + * @param connectionListener a listener invoked once every configured cluster has been connected to + */ + private synchronized void updateRemoteClusters(Map> seeds, ActionListener connectionListener) { + if (seeds.containsKey(LOCAL_CLUSTER_GROUP_KEY)) { + throw new IllegalArgumentException("remote clusters must not have the empty string as its key"); + } + Map remoteClusters = new HashMap<>(); + if (seeds.isEmpty()) { + connectionListener.onResponse(null); + } else { + CountDown countDown = new CountDown(seeds.size()); + Predicate nodePredicate = (node) -> Version.CURRENT.isCompatible(node.getVersion()); + if (REMOTE_NODE_ATTRIBUTE.exists(settings)) { + // nodes can be tagged with node.attr.remote_gateway: true to allow a node to be a gateway node for + // cross cluster search + String attribute = REMOTE_NODE_ATTRIBUTE.get(settings); + nodePredicate = nodePredicate.and((node) -> Boolean.getBoolean(node.getAttributes().getOrDefault(attribute, "false"))); + } + remoteClusters.putAll(this.remoteClusters); + for (Map.Entry> entry : seeds.entrySet()) { + RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey()); + if (entry.getValue().isEmpty()) { // with no seed nodes we just remove the connection + try { + IOUtils.close(remote); + } catch (IOException e) { + logger.warn("failed to close remote cluster connections for cluster: " + entry.getKey(), e); + } + remoteClusters.remove(entry.getKey()); + continue; + } + + if (remote == null) { // this is a new cluster we have to add a new representation + remote = new RemoteClusterConnection(settings, entry.getKey(), entry.getValue(), transportService, numRemoteConnections, + nodePredicate); + remoteClusters.put(entry.getKey(), remote); + } + + // now update the seed nodes no matter if it's new or already existing + RemoteClusterConnection finalRemote = remote; + remote.updateSeedNodes(entry.getValue(), ActionListener.wrap( + response -> { + if (countDown.countDown()) { + connectionListener.onResponse(response); + } + }, + exception -> { + if (countDown.fastForward()) { + connectionListener.onFailure(exception); + } + if (finalRemote.isClosed() == false) { + logger.warn("failed to update seed list for cluster: " + entry.getKey(), exception); + } + })); + } + } + this.remoteClusters = Collections.unmodifiableMap(remoteClusters); + } + + /** + * Returns true if at least one remote cluster is configured + */ + boolean isCrossClusterSearchEnabled() { + return remoteClusters.isEmpty() == false; + } + + /** + * Groups indices per cluster by splitting remote cluster-alias, index-name pairs on {@link #REMOTE_CLUSTER_INDEX_SEPARATOR}. All + * indices per cluster are collected as a list in the returned map keyed by the cluster alias. Local indices are grouped under + * {@link #LOCAL_CLUSTER_GROUP_KEY}. The returned map is mutable. + * + * @param requestIndices the indices in the search request to filter + * @param indexExists a predicate that can test if a certain index or alias exists + * + * @return a map of grouped remote and local indices + */ + Map> groupClusterIndices(String[] requestIndices, Predicate indexExists) { + Map> perClusterIndices = new HashMap<>(); + for (String index : requestIndices) { + int i = index.indexOf(REMOTE_CLUSTER_INDEX_SEPARATOR); + String indexName = index; + String clusterName = LOCAL_CLUSTER_GROUP_KEY; + if (i >= 0) { + String remoteClusterName = index.substring(0, i); + if (isRemoteClusterRegistered(remoteClusterName)) { + if (indexExists.test(index)) { + // we use : as a separator for remote clusters. might conflict if there is an index that is actually named + // remote_cluster_alias:index_name - for this case we fail the request. the user can easily change the cluster alias + // if that happens + throw new IllegalArgumentException("Can not filter indices; index " + index + + " exists but there is also a remote cluster named: " + remoteClusterName); + } + indexName = index.substring(i + 1); + clusterName = remoteClusterName; + } + } + perClusterIndices.computeIfAbsent(clusterName, k -> new ArrayList()).add(indexName); + } + return perClusterIndices; +} + + /** + * Returns true iff the given cluster is configured as a remote cluster. Otherwise false + */ + boolean isRemoteClusterRegistered(String clusterName) { + return remoteClusters.containsKey(clusterName); + } + + void collectSearchShards(SearchRequest searchRequest, Map> remoteIndicesByCluster, + ActionListener> listener) { + final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); + final Map searchShardsResponses = new ConcurrentHashMap<>(); + final AtomicReference transportException = new AtomicReference<>(); + for (Map.Entry> entry : remoteIndicesByCluster.entrySet()) { + final String clusterName = entry.getKey(); + RemoteClusterConnection remoteClusterConnection = remoteClusters.get(clusterName); + if (remoteClusterConnection == null) { + throw new IllegalArgumentException("no such remote cluster: " + clusterName); + } + final List indices = entry.getValue(); + remoteClusterConnection.fetchSearchShards(searchRequest, indices, + new ActionListener() { + @Override + public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { + searchShardsResponses.put(clusterName, clusterSearchShardsResponse); + if (responsesCountDown.countDown()) { + TransportException exception = transportException.get(); + if (exception == null) { + listener.onResponse(searchShardsResponses); + } else { + listener.onFailure(transportException.get()); + } + } + } + + @Override + public void onFailure(Exception e) { + TransportException exception = new TransportException("unable to communicate with remote cluster [" + + clusterName + "]", e); + if (transportException.compareAndSet(null, exception) == false) { + exception = transportException.accumulateAndGet(exception, (previous, current) -> { + current.addSuppressed(previous); + return current; + }); + } + if (responsesCountDown.countDown()) { + listener.onFailure(exception); + } + } + }); + } + } + + + Function processRemoteShards(Map searchShardsResponses, + List remoteShardIterators, + Map aliasFilterMap) { + Map> nodeToCluster = new HashMap<>(); + for (Map.Entry entry : searchShardsResponses.entrySet()) { + String clusterName = entry.getKey(); + ClusterSearchShardsResponse searchShardsResponse = entry.getValue(); + for (DiscoveryNode remoteNode : searchShardsResponse.getNodes()) { + nodeToCluster.put(remoteNode.getId(), () -> getConnection(remoteNode, clusterName)); + } + Map indicesAndFilters = searchShardsResponse.getIndicesAndFilters(); + for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) { + //add the cluster name to the remote index names for indices disambiguation + //this ends up in the hits returned with the search response + ShardId shardId = clusterSearchShardsGroup.getShardId(); + Index remoteIndex = shardId.getIndex(); + Index index = new Index(clusterName + REMOTE_CLUSTER_INDEX_SEPARATOR + remoteIndex.getName(), remoteIndex.getUUID()); + ShardIterator shardIterator = new PlainShardIterator(new ShardId(index, shardId.getId()), + Arrays.asList(clusterSearchShardsGroup.getShards())); + remoteShardIterators.add(shardIterator); + AliasFilter aliasFilter; + if (indicesAndFilters == null) { + aliasFilter = new AliasFilter(null, Strings.EMPTY_ARRAY); + } else { + aliasFilter = indicesAndFilters.get(shardId.getIndexName()); + assert aliasFilter != null; + } + // here we have to map the filters to the UUID since from now on we use the uuid for the lookup + aliasFilterMap.put(remoteIndex.getUUID(), aliasFilter); + } + } + return (nodeId) -> { + Supplier supplier = nodeToCluster.get(nodeId); + if (supplier == null) { + throw new IllegalArgumentException("unknown remote node: " + nodeId); + } + return supplier.get(); + }; + } + + /** + * Returns a connection to the given node on the given remote cluster + * @throws IllegalArgumentException if the remote cluster is unknown + */ + private Transport.Connection getConnection(DiscoveryNode node, String cluster) { + RemoteClusterConnection connection = remoteClusters.get(cluster); + if (connection == null) { + throw new IllegalArgumentException("no such remote cluster: " + cluster); + } + return connection.getConnection(node); + } + + void updateRemoteCluster(String clusterAlias, List addresses) { + updateRemoteClusters(Collections.singletonMap(clusterAlias, addresses.stream().map(address -> { + TransportAddress transportAddress = new TransportAddress(address); + return new DiscoveryNode(clusterAlias + "#" + transportAddress.toString(), + transportAddress, + Version.CURRENT.minimumCompatibilityVersion()); + }).collect(Collectors.toList())), + ActionListener.wrap((x) -> {}, (x) -> {}) ); + } + + static Map> buildRemoteClustersSeeds(Settings settings) { + Stream>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings); + return allConcreteSettings.collect( + Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> { + String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting); + List nodes = new ArrayList<>(); + for (InetSocketAddress address : concreteSetting.get(settings)) { + TransportAddress transportAddress = new TransportAddress(address); + DiscoveryNode node = new DiscoveryNode(clusterName + "#" + transportAddress.toString(), + transportAddress, + Version.CURRENT.minimumCompatibilityVersion()); + nodes.add(node); + } + return nodes; + })); + } + + private static InetSocketAddress parseSeedAddress(String remoteHost) { + int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300 + if (portSeparator == -1 || portSeparator == remoteHost.length()) { + throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead"); + } + String host = remoteHost.substring(0, portSeparator); + InetAddress hostAddress; + try { + hostAddress = InetAddress.getByName(host); + } catch (UnknownHostException e) { + throw new IllegalArgumentException("unknown host [" + host + "]", e); + } + try { + int port = Integer.valueOf(remoteHost.substring(portSeparator + 1)); + if (port <= 0) { + throw new IllegalArgumentException("port number must be > 0 but was: [" + port + "]"); + } + return new InetSocketAddress(hostAddress, port); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("port must be a number", e); + } + + } + + /** + * Connects to all remote clusters in a blocking fashion. This should be called on node startup to establish an initial connection + * to all configured seed nodes. + */ + void initializeRemoteClusters() { + final TimeValue timeValue = REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); + final PlainActionFuture future = new PlainActionFuture<>(); + Map> seeds = buildRemoteClustersSeeds(settings); + updateRemoteClusters(seeds, future); + try { + future.get(timeValue.millis(), TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (TimeoutException ex) { + logger.warn("failed to connect to remote clusters within {}", timeValue.toString()); + } catch (Exception e) { + throw new IllegalStateException("failed to connect to remote clusters", e); + } + } + + @Override + public void close() throws IOException { + IOUtils.close(remoteClusters.values()); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java b/core/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java index 488132fdda2..76d4ac11413 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java +++ b/core/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java @@ -23,7 +23,7 @@ class ScrollIdForNode { private final String node; private final long scrollId; - public ScrollIdForNode(String node, long scrollId) { + ScrollIdForNode(String node, long scrollId) { this.node = node; this.scrollId = scrollId; } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java deleted file mode 100644 index 9db3a21c485..00000000000 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.search; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.search.dfs.AggregatedDfs; -import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.fetch.QueryFetchSearchResult; -import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; -import org.elasticsearch.search.internal.ShardSearchTransportRequest; -import org.elasticsearch.search.query.QuerySearchRequest; - -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.Executor; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Function; - -class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction { - - private final AtomicArray queryFetchResults; - private final SearchPhaseController searchPhaseController; - SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - Function nodeIdToDiscoveryNode, - Map aliasFilter, Map concreteIndexBoosts, - SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, - ActionListener listener, GroupShardsIterator shardsIts, - long startTime, long clusterStateVersion, SearchTask task) { - super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor, - request, listener, shardsIts, startTime, clusterStateVersion, task); - this.searchPhaseController = searchPhaseController; - queryFetchResults = new AtomicArray<>(firstResults.length()); - } - - @Override - protected String firstPhaseName() { - return "dfs"; - } - - @Override - protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, - ActionListener listener) { - searchTransportService.sendExecuteDfs(node, request, task, listener); - } - - @Override - protected void moveToSecondPhase() { - final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults); - final AtomicInteger counter = new AtomicInteger(firstResults.asList().size()); - - for (final AtomicArray.Entry entry : firstResults.asList()) { - DfsSearchResult dfsResult = entry.value; - DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().nodeId()); - QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); - executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest); - } - } - - void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, - final DiscoveryNode node, final QuerySearchRequest querySearchRequest) { - searchTransportService.sendExecuteFetch(node, querySearchRequest, task, new ActionListener() { - @Override - public void onResponse(QueryFetchSearchResult result) { - result.shardTarget(dfsResult.shardTarget()); - queryFetchResults.set(shardIndex, result); - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } - - @Override - public void onFailure(Exception t) { - try { - onSecondPhaseFailure(t, querySearchRequest, shardIndex, dfsResult, counter); - } finally { - // the query might not have been executed at all (for example because thread pool rejected execution) - // and the search context that was created in dfs phase might not be released. - // release it again to be in the safe side - sendReleaseSearchContext(querySearchRequest.id(), node); - } - } - }); - } - - void onSecondPhaseFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, - AtomicInteger counter) { - if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e); - } - this.addShardFailure(shardIndex, dfsResult.shardTarget(), e); - successfulOps.decrementAndGet(); - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } - - private void finishHim() { - getExecutor().execute(new ActionRunnable(listener) { - @Override - public void doRun() throws IOException { - sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults); - final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryFetchResults, - queryFetchResults); - String scrollId = null; - if (request.scroll() != null) { - scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults); - } - listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), - buildTookInMillis(), buildShardFailures())); - } - - @Override - public void onFailure(Exception e) { - ReduceSearchPhaseException failure = new ReduceSearchPhaseException("query_fetch", "", e, buildShardFailures()); - if (logger.isDebugEnabled()) { - logger.debug("failed to reduce search", failure); - } - super.onFailure(e); - } - }); - - } -} diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 3fe24cc9911..9cc686507f5 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -19,209 +19,109 @@ package org.elasticsearch.action.search; -import com.carrotsearch.hppc.IntArrayList; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.transport.Transport; -import java.io.IOException; import java.util.Map; import java.util.concurrent.Executor; -import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; -class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { - - final AtomicArray queryResults; - final AtomicArray fetchResults; - final AtomicArray docIdsToLoad; - private final SearchPhaseController searchPhaseController; +final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - Function nodeIdToDiscoveryNode, + Function nodeIdToConnection, Map aliasFilter, Map concreteIndexBoosts, SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) { - super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor, + super(logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, searchPhaseController, executor, request, listener, shardsIts, startTime, clusterStateVersion, task); - this.searchPhaseController = searchPhaseController; - queryResults = new AtomicArray<>(firstResults.length()); - fetchResults = new AtomicArray<>(firstResults.length()); - docIdsToLoad = new AtomicArray<>(firstResults.length()); } @Override - protected String firstPhaseName() { + protected String initialPhaseName() { return "dfs"; } @Override - protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, + protected void sendExecuteFirstPhase(Transport.Connection connection, ShardSearchTransportRequest request, ActionListener listener) { - searchTransportService.sendExecuteDfs(node, request, task, listener); + searchTransportService.sendExecuteDfs(connection, request, task, listener); } @Override - protected void moveToSecondPhase() { - final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults); - final AtomicInteger counter = new AtomicInteger(firstResults.asList().size()); - for (final AtomicArray.Entry entry : firstResults.asList()) { - DfsSearchResult dfsResult = entry.value; - DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().nodeId()); - QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); - executeQuery(entry.index, dfsResult, counter, querySearchRequest, node); - } + protected CheckedRunnable getNextPhase(AtomicArray initialResults) { + return new DfsQueryPhase(initialResults, searchPhaseController, + (queryResults) -> new FetchPhase(queryResults, searchPhaseController)); } - void executeQuery(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, - final QuerySearchRequest querySearchRequest, final DiscoveryNode node) { - searchTransportService.sendExecuteQuery(node, querySearchRequest, task, new ActionListener() { - @Override - public void onResponse(QuerySearchResult result) { - result.shardTarget(dfsResult.shardTarget()); - queryResults.set(shardIndex, result); - if (counter.decrementAndGet() == 0) { - executeFetchPhase(); - } - } + private final class DfsQueryPhase implements CheckedRunnable { + private final AtomicArray queryResult; + private final SearchPhaseController searchPhaseController; + private final AtomicArray firstResults; + private final Function, CheckedRunnable> nextPhaseFactory; - @Override - public void onFailure(Exception t) { - try { - onQueryFailure(t, querySearchRequest, shardIndex, dfsResult, counter); - } finally { - // the query might not have been executed at all (for example because thread pool rejected - // execution) and the search context that was created in dfs phase might not be released. - // release it again to be in the safe side - sendReleaseSearchContext(querySearchRequest.id(), node); - } - } - }); - } - - void onQueryFailure(Exception e, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, - AtomicInteger counter) { - if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute query phase", querySearchRequest.id()), e); - } - this.addShardFailure(shardIndex, dfsResult.shardTarget(), e); - successfulOps.decrementAndGet(); - if (counter.decrementAndGet() == 0) { - if (successfulOps.get() == 0) { - listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", buildShardFailures())); - } else { - executeFetchPhase(); - } - } - } - - void executeFetchPhase() { - try { - innerExecuteFetchPhase(); - } catch (Exception e) { - listener.onFailure(new ReduceSearchPhaseException("query", "", e, buildShardFailures())); - } - } - - void innerExecuteFetchPhase() throws Exception { - final boolean isScrollRequest = request.scroll() != null; - sortedShardDocs = searchPhaseController.sortDocs(isScrollRequest, queryResults); - searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardDocs); - - if (docIdsToLoad.asList().isEmpty()) { - finishHim(); - return; + DfsQueryPhase(AtomicArray firstResults, + SearchPhaseController searchPhaseController, + Function, CheckedRunnable> nextPhaseFactory) { + this.queryResult = new AtomicArray<>(firstResults.length()); + this.searchPhaseController = searchPhaseController; + this.firstResults = firstResults; + this.nextPhaseFactory = nextPhaseFactory; } - final ScoreDoc[] lastEmittedDocPerShard = (request.scroll() != null) ? - searchPhaseController.getLastEmittedDocPerShard(queryResults.asList(), sortedShardDocs, firstResults.length()) : null; - final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size()); - for (final AtomicArray.Entry entry : docIdsToLoad.asList()) { - QuerySearchResult queryResult = queryResults.get(entry.index); - DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().nodeId()); - ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard); - executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); - } - } - - void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, - final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) { - searchTransportService.sendExecuteFetch(node, fetchSearchRequest, task, new ActionListener() { - @Override - public void onResponse(FetchSearchResult result) { - result.shardTarget(shardTarget); - fetchResults.set(shardIndex, result); - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } - - @Override - public void onFailure(Exception t) { - // the search context might not be cleared on the node where the fetch was executed for example - // because the action was rejected by the thread pool. in this case we need to send a dedicated - // request to clear the search context. by setting docIdsToLoad to null, the context will be cleared - // in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done. - docIdsToLoad.set(shardIndex, null); - onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter); - } - }); - } - - void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, - SearchShardTarget shardTarget, AtomicInteger counter) { - if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e); - } - this.addShardFailure(shardIndex, shardTarget, e); - successfulOps.decrementAndGet(); - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } - - private void finishHim() { - getExecutor().execute(new ActionRunnable(listener) { - @Override - public void doRun() throws IOException { - final boolean isScrollRequest = request.scroll() != null; - final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, sortedShardDocs, queryResults, - fetchResults); - String scrollId = isScrollRequest ? TransportSearchHelper.buildScrollId(request.searchType(), firstResults) : null; - listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), - buildTookInMillis(), buildShardFailures())); - releaseIrrelevantSearchContexts(queryResults, docIdsToLoad); - } - - @Override - public void onFailure(Exception e) { - try { - ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", e, buildShardFailures()); - if (logger.isDebugEnabled()) { - logger.debug("failed to reduce search", failure); + @Override + public void run() throws Exception { + final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults); + final CountedCollector counter = new CountedCollector<>(queryResult, firstResults.asList().size(), + (successfulOps) -> { + if (successfulOps == 0) { + listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", buildShardFailures())); + } else { + executePhase("fetch", this.nextPhaseFactory.apply(queryResult), null); } - super.onFailure(failure); - } finally { - releaseIrrelevantSearchContexts(queryResults, docIdsToLoad); - } + }); + for (final AtomicArray.Entry entry : firstResults.asList()) { + DfsSearchResult dfsResult = entry.value; + final int shardIndex = entry.index; + Transport.Connection connection = nodeIdToConnection.apply(dfsResult.shardTarget().getNodeId()); + QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); + searchTransportService.sendExecuteQuery(connection, querySearchRequest, task, new ActionListener() { + @Override + public void onResponse(QuerySearchResult result) { + counter.onResult(shardIndex, result, dfsResult.shardTarget()); + } + + @Override + public void onFailure(Exception e) { + try { + if (logger.isDebugEnabled()) { + logger.debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute query phase", + querySearchRequest.id()), e); + } + counter.onFailure(shardIndex, dfsResult.shardTarget(), e); + } finally { + // the query might not have been executed at all (for example because thread pool rejected + // execution) and the search context that was created in dfs phase might not be released. + // release it again to be in the safe side + sendReleaseSearchContext(querySearchRequest.id(), connection); + } + } + }); } - }); + } } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 92270c6fe36..f59d7fe50db 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -30,11 +30,16 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.grouping.CollapseTopFieldDocs; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.IntsRef; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.IntArray; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -63,17 +68,19 @@ import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.function.BiConsumer; import java.util.stream.Collectors; import java.util.stream.StreamSupport; public class SearchPhaseController extends AbstractComponent { private static final Comparator> QUERY_RESULT_ORDERING = (o1, o2) -> { - int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index()); + int i = o1.value.shardTarget().getIndex().compareTo(o2.value.shardTarget().getIndex()); if (i == 0) { - i = o1.value.shardTarget().shardId().id() - o2.value.shardTarget().shardId().id(); + i = o1.value.shardTarget().getShardId().id() - o2.value.shardTarget().getShardId().id(); } return i; }; @@ -82,11 +89,25 @@ public class SearchPhaseController extends AbstractComponent { private final BigArrays bigArrays; private final ScriptService scriptService; + private final List > searchResponseListener; public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService) { + this(settings, bigArrays, scriptService, Collections.emptyList()); + } + + public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, + List > searchResponseListener) { super(settings); this.bigArrays = bigArrays; this.scriptService = scriptService; + this.searchResponseListener = searchResponseListener; + } + + /** + * Returns the search response listeners registry + */ + public List > getSearchResponseListener() { + return searchResponseListener; } public AggregatedDfs aggregateDfs(AtomicArray results) { @@ -236,7 +257,26 @@ public class SearchPhaseController extends AbstractComponent { } final TopDocs mergedTopDocs; - if (firstResult.queryResult().topDocs() instanceof TopFieldDocs) { + int numShards = resultsArr.length(); + if (firstResult.queryResult().topDocs() instanceof CollapseTopFieldDocs) { + CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) firstResult.queryResult().topDocs(); + final Sort sort = new Sort(firstTopDocs.fields); + + final CollapseTopFieldDocs[] shardTopDocs = new CollapseTopFieldDocs[numShards]; + for (AtomicArray.Entry sortedResult : sortedResults) { + TopDocs topDocs = sortedResult.value.queryResult().topDocs(); + // the 'index' field is the position in the resultsArr atomic array + shardTopDocs[sortedResult.index] = (CollapseTopFieldDocs) topDocs; + } + // TopDocs#merge can't deal with null shard TopDocs + for (int i = 0; i < shardTopDocs.length; ++i) { + if (shardTopDocs[i] == null) { + shardTopDocs[i] = new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0], + sort.getSort(), new Object[0], Float.NaN); + } + } + mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs); + } else if (firstResult.queryResult().topDocs() instanceof TopFieldDocs) { TopFieldDocs firstTopDocs = (TopFieldDocs) firstResult.queryResult().topDocs(); final Sort sort = new Sort(firstTopDocs.fields); @@ -316,6 +356,8 @@ public class SearchPhaseController extends AbstractComponent { } // from is always zero as when we use scroll, we ignore from long size = Math.min(fetchHits, topN(queryResults)); + // with collapsing we can have more hits than sorted docs + size = Math.min(sortedScoreDocs.length, size); for (int sortedDocsIndex = 0; sortedDocsIndex < size; sortedDocsIndex++) { ScoreDoc scoreDoc = sortedScoreDocs[sortedDocsIndex]; lastEmittedDocPerShard[scoreDoc.shardIndex] = scoreDoc; @@ -328,15 +370,16 @@ public class SearchPhaseController extends AbstractComponent { /** * Builds an array, with potential null elements, with docs to load. */ - public void fillDocIdsToLoad(AtomicArray docIdsToLoad, ScoreDoc[] shardDocs) { + public IntArrayList[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDocs) { + IntArrayList[] docIdsToLoad = new IntArrayList[numShards]; for (ScoreDoc shardDoc : shardDocs) { - IntArrayList shardDocIdsToLoad = docIdsToLoad.get(shardDoc.shardIndex); + IntArrayList shardDocIdsToLoad = docIdsToLoad[shardDoc.shardIndex]; if (shardDocIdsToLoad == null) { - shardDocIdsToLoad = new IntArrayList(); // can't be shared!, uses unsafe on it later on - docIdsToLoad.set(shardDoc.shardIndex, shardDocIdsToLoad); + shardDocIdsToLoad = docIdsToLoad[shardDoc.shardIndex] = new IntArrayList(); } shardDocIdsToLoad.add(shardDoc.doc); } + return docIdsToLoad; } /** @@ -362,11 +405,16 @@ public class SearchPhaseController extends AbstractComponent { boolean sorted = false; int sortScoreIndex = -1; if (firstResult.topDocs() instanceof TopFieldDocs) { - sorted = true; TopFieldDocs fieldDocs = (TopFieldDocs) firstResult.queryResult().topDocs(); - for (int i = 0; i < fieldDocs.fields.length; i++) { - if (fieldDocs.fields[i].getType() == SortField.Type.SCORE) { - sortScoreIndex = i; + if (fieldDocs instanceof CollapseTopFieldDocs && + fieldDocs.fields.length == 1 && fieldDocs.fields[0].getType() == SortField.Type.SCORE) { + sorted = false; + } else { + sorted = true; + for (int i = 0; i < fieldDocs.fields.length; i++) { + if (fieldDocs.fields[i].getType() == SortField.Type.SCORE) { + sortScoreIndex = i; + } } } } @@ -405,6 +453,8 @@ public class SearchPhaseController extends AbstractComponent { } int from = ignoreFrom ? 0 : firstResult.queryResult().from(); int numSearchHits = (int) Math.min(fetchHits - from, topN(queryResults)); + // with collapsing we can have more fetch hits than sorted docs + numSearchHits = Math.min(sortedDocs.length, numSearchHits); // merge hits List hits = new ArrayList<>(); if (!fetchResults.isEmpty()) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java index 515d3204fb6..c6e0b21dffd 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java @@ -103,6 +103,7 @@ public class SearchPhaseExecutionException extends ElasticsearchException { return shardFailures; } + @Override public Throwable getCause() { Throwable cause = super.getCause(); if (cause == null) { @@ -131,28 +132,34 @@ public class SearchPhaseExecutionException extends ElasticsearchException { } @Override - protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException { builder.field("phase", phaseName); final boolean group = params.paramAsBoolean("group_shard_failures", true); // we group by default builder.field("grouped", group); // notify that it's grouped builder.field("failed_shards"); builder.startArray(); - ShardOperationFailedException[] failures = params.paramAsBoolean("group_shard_failures", true) ? ExceptionsHelper.groupBy(shardFailures) : shardFailures; + ShardOperationFailedException[] failures = params.paramAsBoolean("group_shard_failures", true) ? + ExceptionsHelper.groupBy(shardFailures) : shardFailures; for (ShardOperationFailedException failure : failures) { builder.startObject(); failure.toXContent(builder, params); builder.endObject(); } builder.endArray(); - super.innerToXContent(builder, params); } @Override - protected void causeToXContent(XContentBuilder builder, Params params) throws IOException { - if (super.getCause() != null) { - // if the cause is null we inject a guessed root cause that will then be rendered twice so wi disable it manually - super.causeToXContent(builder, params); + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + Throwable ex = ExceptionsHelper.unwrapCause(this); + if (ex != this) { + generateThrowableXContent(builder, params, this); + } else { + // We don't have a cause when all shards failed, but we do have shards failures so we can "guess" a cause + // (see {@link #getCause()}). Here, we use super.getCause() because we don't want the guessed exception to + // be rendered twice (one in the "cause" field, one in "failed_shards") + innerToXContent(builder, params, this, getExceptionName(), getMessage(), getHeaders(), getMetadata(), super.getCause()); } + return builder; } @Override diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java index f597ede64bc..34779684132 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java @@ -21,69 +21,44 @@ package org.elasticsearch.action.search; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; +import org.elasticsearch.transport.Transport; -import java.io.IOException; import java.util.Map; import java.util.concurrent.Executor; import java.util.function.Function; -class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction { - - private final SearchPhaseController searchPhaseController; +final class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction { SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - Function nodeIdToDiscoveryNode, + Function nodeIdToConnection, Map aliasFilter, Map concreteIndexBoosts, SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) { - super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor, + super(logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, searchPhaseController, executor, request, listener, shardsIts, startTime, clusterStateVersion, task); - this.searchPhaseController = searchPhaseController; - } @Override - protected String firstPhaseName() { + protected String initialPhaseName() { return "query_fetch"; } @Override - protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, + protected void sendExecuteFirstPhase(Transport.Connection connection, ShardSearchTransportRequest request, ActionListener listener) { - searchTransportService.sendExecuteFetch(node, request, task, listener); + searchTransportService.sendExecuteFetch(connection, request, task, listener); } @Override - protected void moveToSecondPhase() throws Exception { - getExecutor().execute(new ActionRunnable(listener) { - @Override - public void doRun() throws IOException { - final boolean isScrollRequest = request.scroll() != null; - sortedShardDocs = searchPhaseController.sortDocs(isScrollRequest, firstResults); - final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, sortedShardDocs, firstResults, - firstResults); - String scrollId = isScrollRequest ? TransportSearchHelper.buildScrollId(request.searchType(), firstResults) : null; - listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), - buildTookInMillis(), buildShardFailures())); - } - - @Override - public void onFailure(Exception e) { - ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", e, buildShardFailures()); - if (logger.isDebugEnabled()) { - logger.debug("failed to reduce search", failure); - } - super.onFailure(failure); - } - }); + protected CheckedRunnable getNextPhase(AtomicArray initialResults) { + return () -> sendResponseAsync("fetch", searchPhaseController, null, initialResults, initialResults); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 7b300063291..a219f183398 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -19,144 +19,46 @@ package org.elasticsearch.action.search; -import com.carrotsearch.hppc.IntArrayList; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.transport.Transport; -import java.io.IOException; import java.util.Map; import java.util.concurrent.Executor; -import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; -class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { - - final AtomicArray fetchResults; - final AtomicArray docIdsToLoad; - private final SearchPhaseController searchPhaseController; +final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - Function nodeIdToDiscoveryNode, + Function nodeIdToConnection, Map aliasFilter, Map concreteIndexBoosts, SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) { - super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor, request, listener, - shardsIts, startTime, clusterStateVersion, task); - this.searchPhaseController = searchPhaseController; - fetchResults = new AtomicArray<>(firstResults.length()); - docIdsToLoad = new AtomicArray<>(firstResults.length()); + super(logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, searchPhaseController, executor, + request, listener, shardsIts, startTime, clusterStateVersion, task); } @Override - protected String firstPhaseName() { + protected String initialPhaseName() { return "query"; } @Override - protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, + protected void sendExecuteFirstPhase(Transport.Connection connection, ShardSearchTransportRequest request, ActionListener listener) { - searchTransportService.sendExecuteQuery(node, request, task, listener); + searchTransportService.sendExecuteQuery(connection, request, task, listener); } @Override - protected void moveToSecondPhase() throws Exception { - final boolean isScrollRequest = request.scroll() != null; - sortedShardDocs = searchPhaseController.sortDocs(isScrollRequest, firstResults); - searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardDocs); - - if (docIdsToLoad.asList().isEmpty()) { - finishHim(); - return; - } - - final ScoreDoc[] lastEmittedDocPerShard = isScrollRequest ? - searchPhaseController.getLastEmittedDocPerShard(firstResults.asList(), sortedShardDocs, firstResults.length()) : null; - final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size()); - for (AtomicArray.Entry entry : docIdsToLoad.asList()) { - QuerySearchResultProvider queryResult = firstResults.get(entry.index); - DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().nodeId()); - ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), entry, lastEmittedDocPerShard); - executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); - } - } - - void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, - final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) { - searchTransportService.sendExecuteFetch(node, fetchSearchRequest, task, new ActionListener() { - @Override - public void onResponse(FetchSearchResult result) { - result.shardTarget(shardTarget); - fetchResults.set(shardIndex, result); - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } - - @Override - public void onFailure(Exception t) { - // the search context might not be cleared on the node where the fetch was executed for example - // because the action was rejected by the thread pool. in this case we need to send a dedicated - // request to clear the search context. by setting docIdsToLoad to null, the context will be cleared - // in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done. - docIdsToLoad.set(shardIndex, null); - onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter); - } - }); - } - - void onFetchFailure(Exception e, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, - AtomicInteger counter) { - if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e); - } - this.addShardFailure(shardIndex, shardTarget, e); - successfulOps.decrementAndGet(); - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } - - private void finishHim() { - getExecutor().execute(new ActionRunnable(listener) { - @Override - public void doRun() throws IOException { - final boolean isScrollRequest = request.scroll() != null; - final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, sortedShardDocs, firstResults, - fetchResults); - String scrollId = isScrollRequest ? TransportSearchHelper.buildScrollId(request.searchType(), firstResults) : null; - listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, - successfulOps.get(), buildTookInMillis(), buildShardFailures())); - releaseIrrelevantSearchContexts(firstResults, docIdsToLoad); - } - - @Override - public void onFailure(Exception e) { - try { - ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()); - if (logger.isDebugEnabled()) { - logger.debug("failed to reduce search", failure); - } - super.onFailure(failure); - } finally { - releaseIrrelevantSearchContexts(firstResults, docIdsToLoad); - } - } - }); + protected CheckedRunnable getNextPhase(AtomicArray initialResults) { + return new FetchPhase(initialResults, searchPhaseController); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java index b21b0c54be3..9c69f1a763f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -199,7 +198,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest * "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch". */ public SearchRequest searchType(String searchType) { - return searchType(SearchType.fromString(searchType, ParseFieldMatcher.EMPTY)); + return searchType(SearchType.fromString(searchType)); } /** diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 3c320447fe8..865cf01430f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -26,13 +26,14 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.script.Script; +import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; -import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.rescore.RescoreBuilder; +import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.suggest.SuggestBuilder; @@ -503,6 +504,11 @@ public class SearchRequestBuilder extends ActionRequestBuilder docIdsToLoad = new AtomicArray<>(queryResults.length()); - searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardDocs); - - if (docIdsToLoad.asList().isEmpty()) { + if (sortedShardDocs.length == 0) { finishHim(); return; } - + final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(queryResults.length(), sortedShardDocs); final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(queryResults.asList(), sortedShardDocs, queryResults.length()); - final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size()); - for (final AtomicArray.Entry entry : docIdsToLoad.asList()) { - IntArrayList docIds = entry.value; - final QuerySearchResult querySearchResult = queryResults.get(entry.index); - ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index]; - ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc); - DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId()); - searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, task, new ActionListener() { - @Override - public void onResponse(FetchSearchResult result) { - result.shardTarget(querySearchResult.shardTarget()); - fetchResults.set(entry.index, result); - if (counter.decrementAndGet() == 0) { - finishHim(); + final AtomicInteger counter = new AtomicInteger(docIdsToLoad.length); + for (int i = 0; i < docIdsToLoad.length; i++) { + final int index = i; + final IntArrayList docIds = docIdsToLoad[index]; + if (docIds != null) { + final QuerySearchResult querySearchResult = queryResults.get(index); + ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[index]; + ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc); + DiscoveryNode node = nodes.get(querySearchResult.shardTarget().getNodeId()); + searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, task, new ActionListener() { + @Override + public void onResponse(FetchSearchResult result) { + result.shardTarget(querySearchResult.shardTarget()); + fetchResults.set(index, result); + if (counter.decrementAndGet() == 0) { + finishHim(); + } } - } - @Override - public void onFailure(Exception t) { - if (logger.isDebugEnabled()) { - logger.debug("Failed to execute fetch phase", t); - } - successfulOps.decrementAndGet(); - if (counter.decrementAndGet() == 0) { - finishHim(); + @Override + public void onFailure(Exception t) { + if (logger.isDebugEnabled()) { + logger.debug("Failed to execute fetch phase", t); + } + successfulOps.decrementAndGet(); + if (counter.decrementAndGet() == 0) { + finishHim(); + } } + }); + } else { + // the counter is set to the total size of docIdsToLoad which can have null values so we have to count them down too + if (counter.decrementAndGet() == 0) { + finishHim(); } - }); + } } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchTask.java b/core/src/main/java/org/elasticsearch/action/search/SearchTask.java index 24f94a43319..d0a1cdd456f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchTask.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchTask.java @@ -31,4 +31,9 @@ public class SearchTask extends CancellableTask { super(id, type, action, description, parentTaskId); } + @Override + public boolean shouldCancelChildrenOnCancellation() { + return true; + } + } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 5b052132566..c62699b6bab 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -25,9 +25,10 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -44,19 +45,23 @@ import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.query.ScrollQuerySearchResult; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportActionProxy; import org.elasticsearch.transport.TaskAwareTransportRequestHandler; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.function.Consumer; /** * An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through * transport. */ -public class SearchTransportService extends AbstractComponent { +public class SearchTransportService extends AbstractLifecycleComponent { public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]"; public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]"; @@ -66,21 +71,24 @@ public class SearchTransportService extends AbstractComponent { public static final String QUERY_ID_ACTION_NAME = "indices:data/read/search[phase/query/id]"; public static final String QUERY_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query/scroll]"; public static final String QUERY_FETCH_ACTION_NAME = "indices:data/read/search[phase/query+fetch]"; - public static final String QUERY_QUERY_FETCH_ACTION_NAME = "indices:data/read/search[phase/query/query+fetch]"; public static final String QUERY_FETCH_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query+fetch/scroll]"; public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]"; public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]"; private final TransportService transportService; + private final RemoteClusterService remoteClusterService; - public SearchTransportService(Settings settings, TransportService transportService) { + public SearchTransportService(Settings settings, ClusterSettings clusterSettings, TransportService transportService) { super(settings); this.transportService = transportService; + this.remoteClusterService = new RemoteClusterService(settings, transportService); + clusterSettings.addAffixUpdateConsumer(RemoteClusterService.REMOTE_CLUSTERS_SEEDS, remoteClusterService::updateRemoteCluster, + (namespace, value) -> {}); } - public void sendFreeContext(DiscoveryNode node, final long contextId, SearchRequest request) { - transportService.sendRequest(node, FREE_CONTEXT_ACTION_NAME, new SearchFreeContextRequest(request, contextId), - new ActionListenerResponseHandler<>(new ActionListener() { + public void sendFreeContext(Transport.Connection connection, final long contextId, SearchRequest request) { + transportService.sendRequest(connection, FREE_CONTEXT_ACTION_NAME, new SearchFreeContextRequest(request, contextId), + TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(new ActionListener() { @Override public void onResponse(SearchFreeContextResponse response) { // no need to respond if it was freed or not @@ -103,64 +111,62 @@ public class SearchTransportService extends AbstractComponent { new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE)); } - public void sendExecuteDfs(DiscoveryNode node, final ShardSearchTransportRequest request, SearchTask task, + public void sendExecuteDfs(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, final ActionListener listener) { - transportService.sendChildRequest(node, DFS_ACTION_NAME, request, task, + transportService.sendChildRequest(connection, DFS_ACTION_NAME, request, task, new ActionListenerResponseHandler<>(listener, DfsSearchResult::new)); } - public void sendExecuteQuery(DiscoveryNode node, final ShardSearchTransportRequest request, SearchTask task, + public void sendExecuteQuery(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, final ActionListener listener) { - transportService.sendChildRequest(node, QUERY_ACTION_NAME, request, task, + transportService.sendChildRequest(connection, QUERY_ACTION_NAME, request, task, new ActionListenerResponseHandler<>(listener, QuerySearchResult::new)); } - public void sendExecuteQuery(DiscoveryNode node, final QuerySearchRequest request, SearchTask task, + public void sendExecuteQuery(Transport.Connection connection, final QuerySearchRequest request, SearchTask task, final ActionListener listener) { - transportService.sendChildRequest(node, QUERY_ID_ACTION_NAME, request, task, + transportService.sendChildRequest(connection, QUERY_ID_ACTION_NAME, request, task, new ActionListenerResponseHandler<>(listener, QuerySearchResult::new)); } public void sendExecuteQuery(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task, final ActionListener listener) { - transportService.sendChildRequest(node, QUERY_SCROLL_ACTION_NAME, request, task, + transportService.sendChildRequest(transportService.getConnection(node), QUERY_SCROLL_ACTION_NAME, request, task, new ActionListenerResponseHandler<>(listener, ScrollQuerySearchResult::new)); } - public void sendExecuteFetch(DiscoveryNode node, final ShardSearchTransportRequest request, SearchTask task, + public void sendExecuteFetch(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, final ActionListener listener) { - transportService.sendChildRequest(node, QUERY_FETCH_ACTION_NAME, request, task, - new ActionListenerResponseHandler<>(listener, QueryFetchSearchResult::new)); - } - - public void sendExecuteFetch(DiscoveryNode node, final QuerySearchRequest request, SearchTask task, - final ActionListener listener) { - transportService.sendChildRequest(node, QUERY_QUERY_FETCH_ACTION_NAME, request, task, + transportService.sendChildRequest(connection, QUERY_FETCH_ACTION_NAME, request, task, new ActionListenerResponseHandler<>(listener, QueryFetchSearchResult::new)); } public void sendExecuteFetch(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task, final ActionListener listener) { - transportService.sendChildRequest(node, QUERY_FETCH_SCROLL_ACTION_NAME, request, task, + transportService.sendChildRequest(transportService.getConnection(node), QUERY_FETCH_SCROLL_ACTION_NAME, request, task, new ActionListenerResponseHandler<>(listener, ScrollQueryFetchSearchResult::new)); } - public void sendExecuteFetch(DiscoveryNode node, final ShardFetchSearchRequest request, SearchTask task, + public void sendExecuteFetch(Transport.Connection connection, final ShardFetchSearchRequest request, SearchTask task, final ActionListener listener) { - sendExecuteFetch(node, FETCH_ID_ACTION_NAME, request, task, listener); + sendExecuteFetch(connection, FETCH_ID_ACTION_NAME, request, task, listener); } public void sendExecuteFetchScroll(DiscoveryNode node, final ShardFetchRequest request, SearchTask task, final ActionListener listener) { - sendExecuteFetch(node, FETCH_ID_SCROLL_ACTION_NAME, request, task, listener); + sendExecuteFetch(transportService.getConnection(node), FETCH_ID_SCROLL_ACTION_NAME, request, task, listener); } - private void sendExecuteFetch(DiscoveryNode node, String action, final ShardFetchRequest request, SearchTask task, + private void sendExecuteFetch(Transport.Connection connection, String action, final ShardFetchRequest request, SearchTask task, final ActionListener listener) { - transportService.sendChildRequest(node, action, request, task, + transportService.sendChildRequest(connection, action, request, task, new ActionListenerResponseHandler<>(listener, FetchSearchResult::new)); } + public RemoteClusterService getRemoteClusterService() { + return remoteClusterService; + } + static class ScrollFreeContextRequest extends TransportRequest { private long id; @@ -191,7 +197,7 @@ public class SearchTransportService extends AbstractComponent { static class SearchFreeContextRequest extends ScrollFreeContextRequest implements IndicesRequest { private OriginalIndices originalIndices; - public SearchFreeContextRequest() { + SearchFreeContextRequest() { } SearchFreeContextRequest(SearchRequest request, long id) { @@ -265,6 +271,7 @@ public class SearchTransportService extends AbstractComponent { channel.sendResponse(new SearchFreeContextResponse(freed)); } }); + TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_SCROLL_ACTION_NAME, SearchFreeContextResponse::new); transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME, new TaskAwareTransportRequestHandler() { @Override @@ -273,6 +280,7 @@ public class SearchTransportService extends AbstractComponent { channel.sendResponse(new SearchFreeContextResponse(freed)); } }); + TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_ACTION_NAME, SearchFreeContextResponse::new); transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, () -> TransportRequest.Empty.INSTANCE, ThreadPool.Names.SAME, new TaskAwareTransportRequestHandler() { @@ -282,6 +290,9 @@ public class SearchTransportService extends AbstractComponent { channel.sendResponse(TransportResponse.Empty.INSTANCE); } }); + TransportActionProxy.registerProxyAction(transportService, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, + () -> TransportResponse.Empty.INSTANCE); + transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, new TaskAwareTransportRequestHandler() { @Override @@ -291,6 +302,8 @@ public class SearchTransportService extends AbstractComponent { } }); + TransportActionProxy.registerProxyAction(transportService, DFS_ACTION_NAME, DfsSearchResult::new); + transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, new TaskAwareTransportRequestHandler() { @Override @@ -299,6 +312,8 @@ public class SearchTransportService extends AbstractComponent { channel.sendResponse(result); } }); + TransportActionProxy.registerProxyAction(transportService, QUERY_ACTION_NAME, QuerySearchResult::new); + transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH, new TaskAwareTransportRequestHandler() { @Override @@ -307,6 +322,8 @@ public class SearchTransportService extends AbstractComponent { channel.sendResponse(result); } }); + TransportActionProxy.registerProxyAction(transportService, QUERY_ID_ACTION_NAME, QuerySearchResult::new); + transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH, new TaskAwareTransportRequestHandler() { @Override @@ -315,6 +332,8 @@ public class SearchTransportService extends AbstractComponent { channel.sendResponse(result); } }); + TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, ScrollQuerySearchResult::new); + transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, new TaskAwareTransportRequestHandler() { @Override @@ -323,14 +342,8 @@ public class SearchTransportService extends AbstractComponent { channel.sendResponse(result); } }); - transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH, - new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(QuerySearchRequest request, TransportChannel channel, Task task) throws Exception { - QueryFetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task); - channel.sendResponse(result); - } - }); + TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_ACTION_NAME, QueryFetchSearchResult::new); + transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH, new TaskAwareTransportRequestHandler() { @Override @@ -339,6 +352,8 @@ public class SearchTransportService extends AbstractComponent { channel.sendResponse(result); } }); + TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_SCROLL_ACTION_NAME, ScrollQueryFetchSearchResult::new); + transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH, new TaskAwareTransportRequestHandler() { @Override @@ -347,6 +362,8 @@ public class SearchTransportService extends AbstractComponent { channel.sendResponse(result); } }); + TransportActionProxy.registerProxyAction(transportService, FETCH_ID_SCROLL_ACTION_NAME, FetchSearchResult::new); + transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH, new TaskAwareTransportRequestHandler() { @Override @@ -355,6 +372,24 @@ public class SearchTransportService extends AbstractComponent { channel.sendResponse(result); } }); + TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, FetchSearchResult::new); + } + Transport.Connection getConnection(DiscoveryNode node) { + return transportService.getConnection(node); + } + + @Override + protected void doStart() { + // here we start to connect to the remote clusters + remoteClusterService.initializeRemoteClusters(); + } + + @Override + protected void doStop() {} + + @Override + protected void doClose() throws IOException { + remoteClusterService.close(); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchType.java b/core/src/main/java/org/elasticsearch/action/search/SearchType.java index 31535736957..8230a9148b8 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchType.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchType.java @@ -19,8 +19,6 @@ package org.elasticsearch.action.search; -import org.elasticsearch.common.ParseFieldMatcher; - /** * Search type represent the manner at which the search operation is executed. * @@ -39,11 +37,7 @@ public enum SearchType { * are fetched. This is very handy when the index has a lot of shards (not replicas, shard id groups). */ QUERY_THEN_FETCH((byte) 1), - /** - * Same as {@link #QUERY_AND_FETCH}, except for an initial scatter phase which goes and computes the distributed - * term frequencies for more accurate scoring. - */ - DFS_QUERY_AND_FETCH((byte) 2), + // 2 used to be DFS_QUERY_AND_FETCH /** * The most naive (and possibly fastest) implementation is to simply execute the query on all relevant shards * and return the results. Each shard returns size results. Since each shard already returns size hits, this @@ -77,8 +71,6 @@ public enum SearchType { return DFS_QUERY_THEN_FETCH; } else if (id == 1) { return QUERY_THEN_FETCH; - } else if (id == 2) { - return DFS_QUERY_AND_FETCH; } else if (id == 3) { return QUERY_AND_FETCH; } else { @@ -91,14 +83,12 @@ public enum SearchType { * one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch", * "query_then_fetch"/"queryThenFetch" and "query_and_fetch"/"queryAndFetch". */ - public static SearchType fromString(String searchType, ParseFieldMatcher parseFieldMatcher) { + public static SearchType fromString(String searchType) { if (searchType == null) { return SearchType.DEFAULT; } if ("dfs_query_then_fetch".equals(searchType)) { return SearchType.DFS_QUERY_THEN_FETCH; - } else if ("dfs_query_and_fetch".equals(searchType)) { - return SearchType.DFS_QUERY_AND_FETCH; } else if ("query_then_fetch".equals(searchType)) { return SearchType.QUERY_THEN_FETCH; } else if ("query_and_fetch".equals(searchType)) { diff --git a/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index 8070081dcd8..2aa0ad3c7be 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -22,21 +22,34 @@ package org.elasticsearch.action.search; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchException; import org.elasticsearch.search.SearchShardTarget; import java.io.IOException; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; +import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken; + /** * Represents a failure to search on a specific shard. */ public class ShardSearchFailure implements ShardOperationFailedException { + private static final String REASON_FIELD = "reason"; + private static final String NODE_FIELD = "node"; + private static final String INDEX_FIELD = "index"; + private static final String SHARD_FIELD = "shard"; + public static final ShardSearchFailure[] EMPTY_ARRAY = new ShardSearchFailure[0]; private SearchShardTarget shardTarget; @@ -68,7 +81,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { this(reason, shardTarget, RestStatus.INTERNAL_SERVER_ERROR); } - public ShardSearchFailure(String reason, SearchShardTarget shardTarget, RestStatus status) { + private ShardSearchFailure(String reason, SearchShardTarget shardTarget, RestStatus status) { this.shardTarget = shardTarget; this.reason = reason; this.status = status; @@ -93,7 +106,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { @Override public String index() { if (shardTarget != null) { - return shardTarget.index(); + return shardTarget.getIndex(); } return null; } @@ -104,7 +117,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { @Override public int shardId() { if (shardTarget != null) { - return shardTarget.shardId().id(); + return shardTarget.getShardId().id(); } return -1; } @@ -153,20 +166,55 @@ public class ShardSearchFailure implements ShardOperationFailedException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("shard", shardId()); - builder.field("index", index()); + builder.field(SHARD_FIELD, shardId()); + builder.field(INDEX_FIELD, index()); if (shardTarget != null) { - builder.field("node", shardTarget.nodeId()); + builder.field(NODE_FIELD, shardTarget.getNodeId()); } if (cause != null) { - builder.field("reason"); + builder.field(REASON_FIELD); builder.startObject(); - ElasticsearchException.toXContent(builder, params, cause); + ElasticsearchException.generateThrowableXContent(builder, params, cause); builder.endObject(); } return builder; } + public static ShardSearchFailure fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + String currentFieldName = null; + int shardId = -1; + String indexName = null; + String nodeId = null; + ElasticsearchException exception = null; + while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (SHARD_FIELD.equals(currentFieldName)) { + shardId = parser.intValue(); + } else if (INDEX_FIELD.equals(currentFieldName)) { + indexName = parser.text(); + } else if (NODE_FIELD.equals(currentFieldName)) { + nodeId = parser.text(); + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (REASON_FIELD.equals(currentFieldName)) { + exception = ElasticsearchException.fromXContent(parser); + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else { + throwUnknownToken(token, parser.getTokenLocation()); + } + } + return new ShardSearchFailure(exception, + new SearchShardTarget(nodeId, new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), shardId))); + } + @Override public Throwable getCause() { return cause; diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 48ee5cc288b..8f94c1e3769 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -26,8 +26,11 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -38,13 +41,17 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.Executor; +import java.util.function.BiConsumer; import java.util.function.Function; import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH; @@ -58,6 +65,7 @@ public class TransportSearchAction extends HandledTransportAction buildPerIndexAliasFilter(SearchRequest request, ClusterState clusterState, Index[] concreteIndices) { + private Map buildPerIndexAliasFilter(SearchRequest request, ClusterState clusterState, + Index[] concreteIndices, Map remoteAliasMap) { final Map aliasFilterMap = new HashMap<>(); for (Index index : concreteIndices) { clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index.getName()); @@ -82,6 +92,7 @@ public class TransportSearchAction extends HandledTransportAction listener) { // pure paranoia if time goes backwards we are at least positive final long startTimeInMillis = Math.max(0, System.currentTimeMillis()); - ClusterState clusterState = clusterService.state(); - clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); + final String[] localIndices; + final Map> remoteClusterIndices; + final ClusterState clusterState = clusterService.state(); + if (remoteClusterService.isCrossClusterSearchEnabled()) { + remoteClusterIndices = remoteClusterService.groupClusterIndices( searchRequest.indices(), // empty string is not allowed + idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState)); + List remove = remoteClusterIndices.remove(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY); + localIndices = remove == null ? Strings.EMPTY_ARRAY : remove.toArray(new String[remove.size()]); + } else { + remoteClusterIndices = Collections.emptyMap(); + localIndices = searchRequest.indices(); + } + if (remoteClusterIndices.isEmpty()) { + executeSearch((SearchTask)task, startTimeInMillis, searchRequest, localIndices, Collections.emptyList(), + (nodeId) -> null, clusterState, Collections.emptyMap(), listener); + } else { + remoteClusterService.collectSearchShards(searchRequest, remoteClusterIndices, + ActionListener.wrap((searchShardsResponses) -> { + List remoteShardIterators = new ArrayList<>(); + Map remoteAliasFilters = new HashMap<>(); + Function connectionFunction = remoteClusterService.processRemoteShards( + searchShardsResponses, remoteShardIterators, remoteAliasFilters); + executeSearch((SearchTask)task, startTimeInMillis, searchRequest, localIndices, remoteShardIterators, + connectionFunction, clusterState, remoteAliasFilters, listener); + }, listener::onFailure)); + } + } + + private void executeSearch(SearchTask task, long startTimeInMillis, SearchRequest searchRequest, String[] localIndices, + List remoteShardIterators, Function remoteConnections, + ClusterState clusterState, Map remoteAliasMap, + ActionListener listener) { + + clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead // of just for the _search api - Index[] indices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(), - startTimeInMillis, searchRequest.indices()); - Map aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices); + final Index[] indices; + if (localIndices.length == 0 && remoteShardIterators.size() > 0) { + indices = Index.EMPTY_ARRAY; // don't search on _all if only remote indices were specified + } else { + indices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(), + startTimeInMillis, localIndices); + } + Map aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap); Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices()); String[] concreteIndices = new String[indices.length]; for (int i = 0; i < indices.length; i++) { concreteIndices[i] = indices[i].getName(); } - GroupShardsIterator shardIterators = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, + GroupShardsIterator localShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, searchRequest.preference()); + GroupShardsIterator shardIterators = mergeShardsIterators(localShardsIterator, remoteShardIterators); + failIfOverShardCountLimit(clusterService, shardIterators.size()); Map concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState); @@ -142,7 +191,6 @@ public class TransportSearchAction extends HandledTransportAction connectionLookup = (nodeId) -> { + final DiscoveryNode discoveryNode = nodes.get(nodeId); + final Transport.Connection connection; + if (discoveryNode != null) { + connection = searchTransportService.getConnection(discoveryNode); + } else { + connection = remoteConnections.apply(nodeId); + } + if (connection == null) { + throw new IllegalStateException("no node found for id: " + nodeId); + } + return connection; + }; + + final ActionListener wrapper; + if (searchPhaseController.getSearchResponseListener().size() > 0) { + wrapper = ActionListener.wrap(searchResponse -> { + List> responseListeners = + searchPhaseController.getSearchResponseListener(); + for (BiConsumer respListener : responseListeners) { + respListener.accept(searchRequest, searchResponse); + } + listener.onResponse(searchResponse); + + }, listener::onFailure); + } else { + wrapper = listener; + } + searchAsyncAction(task, searchRequest, shardIterators, startTimeInMillis, connectionLookup, clusterState.version(), + Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, wrapper).start(); + } + + private static GroupShardsIterator mergeShardsIterators(GroupShardsIterator localShardsIterator, + List remoteShardIterators) { + if (remoteShardIterators.isEmpty()) { + return localShardsIterator; + } + List shards = new ArrayList<>(); + for (ShardIterator shardIterator : remoteShardIterators) { + shards.add(shardIterator); + } + for (ShardIterator shardIterator : localShardsIterator) { + shards.add(shardIterator); + } + return new GroupShardsIterator(shards); } @Override @@ -160,31 +252,25 @@ public class TransportSearchAction extends HandledTransportAction aliasFilter, + long startTime, Function connectionLookup, + long clusterStateVersion, Map aliasFilter, Map concreteIndexBoosts, ActionListener listener) { - final Function nodesLookup = state.nodes()::get; - final long clusterStateVersion = state.version(); Executor executor = threadPool.executor(ThreadPool.Names.SEARCH); AbstractSearchAsyncAction searchAsyncAction; switch(searchRequest.searchType()) { case DFS_QUERY_THEN_FETCH: - searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup, + searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup, aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, clusterStateVersion, task); break; case QUERY_THEN_FETCH: - searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup, - aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, - clusterStateVersion, task); - break; - case DFS_QUERY_AND_FETCH: - searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup, + searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup, aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, clusterStateVersion, task); break; case QUERY_AND_FETCH: - searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup, + searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, connectionLookup, aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, clusterStateVersion, task); break; @@ -194,7 +280,7 @@ public class TransportSearchAction extends HandledTransportAction shardCountLimit) { throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of " diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java index a09a651086b..fdfa3205ea8 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java @@ -21,11 +21,9 @@ package org.elasticsearch.action.search; import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.RAMOutputStream; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.internal.InternalScrollSearchRequest; -import org.elasticsearch.search.internal.ShardSearchTransportRequest; import java.io.IOException; import java.util.Base64; @@ -39,7 +37,7 @@ final class TransportSearchHelper { static String buildScrollId(SearchType searchType, AtomicArray searchPhaseResults) throws IOException { if (searchType == SearchType.DFS_QUERY_THEN_FETCH || searchType == SearchType.QUERY_THEN_FETCH) { return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults); - } else if (searchType == SearchType.QUERY_AND_FETCH || searchType == SearchType.DFS_QUERY_AND_FETCH) { + } else if (searchType == SearchType.QUERY_AND_FETCH) { return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults); } else { throw new IllegalStateException("search_type [" + searchType + "] not supported"); @@ -53,7 +51,7 @@ final class TransportSearchHelper { for (AtomicArray.Entry entry : searchPhaseResults.asList()) { SearchPhaseResult searchPhaseResult = entry.value; out.writeLong(searchPhaseResult.id()); - out.writeString(searchPhaseResult.shardTarget().nodeId()); + out.writeString(searchPhaseResult.shardTarget().getNodeId()); } byte[] bytes = new byte[(int) out.getFilePointer()]; out.writeTo(bytes, 0); diff --git a/core/src/main/java/org/elasticsearch/action/support/ActionFilter.java b/core/src/main/java/org/elasticsearch/action/support/ActionFilter.java index 880d173b2fe..3e12d0cc842 100644 --- a/core/src/main/java/org/elasticsearch/action/support/ActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/support/ActionFilter.java @@ -47,7 +47,7 @@ public interface ActionFilter { * filter chain. This base class should serve any action filter implementations that doesn't require * to apply async filtering logic. */ - public abstract static class Simple extends AbstractComponent implements ActionFilter { + abstract class Simple extends AbstractComponent implements ActionFilter { protected Simple(Settings settings) { super(settings); diff --git a/core/src/main/java/org/elasticsearch/action/support/ActionFilterChain.java b/core/src/main/java/org/elasticsearch/action/support/ActionFilterChain.java index 56ba070b1aa..97e0c535bff 100644 --- a/core/src/main/java/org/elasticsearch/action/support/ActionFilterChain.java +++ b/core/src/main/java/org/elasticsearch/action/support/ActionFilterChain.java @@ -33,5 +33,5 @@ public interface ActionFilterChain listener); + void proceed(Task task, String action, Request request, ActionListener listener); } diff --git a/core/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java b/core/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java index ee2bc9a4b36..280ba6ac94d 100644 --- a/core/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java +++ b/core/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java @@ -69,9 +69,10 @@ public class ActiveShardsObserver extends AbstractComponent { return; } - final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext()); - if (activeShardCount.enoughShardsActive(observer.observedState(), indexName)) { - onResult.accept(true); + final ClusterState state = clusterService.state(); + final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService, null, logger, threadPool.getThreadContext()); + if (activeShardCount.enoughShardsActive(state, indexName)) { + onResult.accept(true); } else { final Predicate shardsAllocatedPredicate = newState -> activeShardCount.enoughShardsActive(newState, indexName); diff --git a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java index a9a5afed9f3..2e442e2cc14 100644 --- a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java +++ b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.mapper.MapperService; import java.util.ArrayList; @@ -63,18 +64,20 @@ public final class AutoCreateIndex { /** * Should the index be auto created? + * @throws IndexNotFoundException if the the index doesn't exist and shouldn't be auto created */ public boolean shouldAutoCreate(String index, ClusterState state) { + if (resolver.hasIndexOrAlias(index, state)) { + return false; + } // One volatile read, so that all checks are done against the same instance: final AutoCreate autoCreate = this.autoCreate; if (autoCreate.autoCreateIndex == false) { - return false; + throw new IndexNotFoundException("no such index and [" + AUTO_CREATE_INDEX_SETTING.getKey() + "] is [false]", index); } if (dynamicMappingDisabled) { - return false; - } - if (resolver.hasIndexOrAlias(index, state)) { - return false; + throw new IndexNotFoundException("no such index and [" + MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey() + "] is [false]", + index); } // matches not set, default value of "true" if (autoCreate.expressions.isEmpty()) { @@ -84,10 +87,15 @@ public final class AutoCreateIndex { String indexExpression = expression.v1(); boolean include = expression.v2(); if (Regex.simpleMatch(indexExpression, index)) { - return include; + if (include) { + return true; + } + throw new IndexNotFoundException("no such index and [" + AUTO_CREATE_INDEX_SETTING.getKey() + "] contains [-" + + indexExpression + "] which forbids automatic creation of the index", index); } } - return false; + throw new IndexNotFoundException("no such index and [" + AUTO_CREATE_INDEX_SETTING.getKey() + "] ([" + autoCreate + + "]) doesn't match", index); } AutoCreate getAutoCreate() { @@ -101,29 +109,33 @@ public final class AutoCreateIndex { static class AutoCreate { private final boolean autoCreateIndex; private final List> expressions; + private final String string; private AutoCreate(String value) { boolean autoCreateIndex; List> expressions = new ArrayList<>(); try { - autoCreateIndex = Booleans.parseBooleanExact(value); + autoCreateIndex = Booleans.parseBoolean(value); } catch (IllegalArgumentException ex) { try { String[] patterns = Strings.commaDelimitedListToStringArray(value); for (String pattern : patterns) { if (pattern == null || pattern.trim().length() == 0) { - throw new IllegalArgumentException("Can't parse [" + value + "] for setting [action.auto_create_index] must be either [true, false, or a comma separated list of index patterns]"); + throw new IllegalArgumentException("Can't parse [" + value + "] for setting [action.auto_create_index] must " + + "be either [true, false, or a comma separated list of index patterns]"); } pattern = pattern.trim(); Tuple expression; if (pattern.startsWith("-")) { if (pattern.length() == 1) { - throw new IllegalArgumentException("Can't parse [" + value + "] for setting [action.auto_create_index] must contain an index name after [-]"); + throw new IllegalArgumentException("Can't parse [" + value + "] for setting [action.auto_create_index] " + + "must contain an index name after [-]"); } expression = new Tuple<>(pattern.substring(1), false); } else if(pattern.startsWith("+")) { if (pattern.length() == 1) { - throw new IllegalArgumentException("Can't parse [" + value + "] for setting [action.auto_create_index] must contain an index name after [+]"); + throw new IllegalArgumentException("Can't parse [" + value + "] for setting [action.auto_create_index] " + + "must contain an index name after [+]"); } expression = new Tuple<>(pattern.substring(1), true); } else { @@ -139,6 +151,7 @@ public final class AutoCreateIndex { } this.expressions = expressions; this.autoCreateIndex = autoCreateIndex; + this.string = value; } boolean isAutoCreateIndex() { @@ -148,5 +161,10 @@ public final class AutoCreateIndex { List> getExpressions() { return expressions; } + + @Override + public String toString() { + return string; + } } } diff --git a/core/src/main/java/org/elasticsearch/action/support/ContextPreservingActionListener.java b/core/src/main/java/org/elasticsearch/action/support/ContextPreservingActionListener.java new file mode 100644 index 00000000000..a4293bfb485 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/ContextPreservingActionListener.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +import java.util.function.Supplier; + +/** + * Restores the given {@link org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext} + * once the listener is invoked + */ +public final class ContextPreservingActionListener implements ActionListener { + + private final ActionListener delegate; + private final Supplier context; + + public ContextPreservingActionListener(Supplier contextSupplier, ActionListener delegate) { + this.delegate = delegate; + this.context = contextSupplier; + } + + @Override + public void onResponse(R r) { + try (ThreadContext.StoredContext ignore = context.get()) { + delegate.onResponse(r); + } + } + + @Override + public void onFailure(Exception e) { + try (ThreadContext.StoredContext ignore = context.get()) { + delegate.onFailure(e); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index 3f7df803e24..2ced9145674 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -125,7 +125,7 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile if (reason != null) { builder.field("reason"); builder.startObject(); - ElasticsearchException.toXContent(builder, params, reason); + ElasticsearchException.generateThrowableXContent(builder, params, reason); builder.endObject(); } return builder; diff --git a/core/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/core/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 2bc49f7e9f8..b82bfcc7170 100644 --- a/core/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/core/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -26,7 +26,7 @@ import org.elasticsearch.rest.RestRequest; import java.io.IOException; import java.util.Map; -import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue; /** @@ -195,8 +195,8 @@ public class IndicesOptions { //note that allowAliasesToMultipleIndices is not exposed, always true (only for internal use) return fromOptions( - lenientNodeBooleanValue(ignoreUnavailableString, defaultSettings.ignoreUnavailable()), - lenientNodeBooleanValue(allowNoIndicesString, defaultSettings.allowNoIndices()), + nodeBooleanValue(ignoreUnavailableString, "ignore_unavailable", defaultSettings.ignoreUnavailable()), + nodeBooleanValue(allowNoIndicesString, "allow_no_indices", defaultSettings.allowNoIndices()), expandWildcardsOpen, expandWildcardsClosed, defaultSettings.allowAliasesToMultipleIndices(), @@ -279,7 +279,7 @@ public class IndicesOptions { ", allow_no_indices=" + allowNoIndices() + ", expand_wildcards_open=" + expandWildcardsOpen() + ", expand_wildcards_closed=" + expandWildcardsClosed() + - ", allow_alisases_to_multiple_indices=" + allowAliasesToMultipleIndices() + + ", allow_aliases_to_multiple_indices=" + allowAliasesToMultipleIndices() + ", forbid_closed_indices=" + forbidClosedIndices() + ']'; } diff --git a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java index e8f4d943e95..22edbfca2dc 100644 --- a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; @@ -43,7 +42,6 @@ public abstract class TransportAction() { @Override public ShardResponse newInstance() { diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 9f11b9b5a70..951c5dbc3ec 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -318,7 +318,6 @@ public abstract class TransportBroadcastByNodeAction() { @Override @@ -439,7 +438,6 @@ public abstract class TransportBroadcastByNodeAction exceptions; protected List results; - public NodeResponse() { + NodeResponse() { } - public NodeResponse(String nodeId, + NodeResponse(String nodeId, int totalShards, List results, List exceptions) { diff --git a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index ecb03b5c222..f2bc4da423d 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -121,12 +121,12 @@ public abstract class TransportMasterNodeAction masterChangePredicate = MasterNodeChangePredicate.build(clusterState); final DiscoveryNodes nodes = clusterState.nodes(); if (nodes.isLocalNodeElectedMaster() || localExecute(request)) { @@ -160,7 +160,6 @@ public abstract class TransportMasterNodeAction(listener, TransportMasterNodeAction.this::newResponse) { @Override public void handleException(final TransportException exp) { @@ -197,7 +195,7 @@ public abstract class TransportMasterNodeAction listener); + protected abstract void doMasterOperation(Request request, String[] concreteIndices, ClusterState state, ActionListener listener); } diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 6cc063d5af1..d8010f4381f 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -199,7 +199,6 @@ public abstract class TransportNodesAction> extends ReplicationRequest implements WriteRequest { private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; + private long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + /** * Constructor for deserialization. */ @@ -62,11 +66,32 @@ public abstract class ReplicatedWriteRequest public void readFrom(StreamInput in) throws IOException { super.readFrom(in); refreshPolicy = RefreshPolicy.readFrom(in); + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + seqNo = in.readZLong(); + } else { + seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); refreshPolicy.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + out.writeZLong(seqNo); + } + } + + /** + * Returns the sequence number for this operation. The sequence number is assigned while the operation + * is performed on the primary shard. + */ + public long getSeqNo() { + return seqNo; + } + + /** sets the sequence number for this operation. should only be called on the primary shard */ + public void setSeqNo(long seqNo) { + this.seqNo = seqNo; } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 47284789850..6a3d217fcf6 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -202,7 +202,7 @@ public class ReplicationOperation< shard, replicaRequest), replicaException); - if (ignoreReplicaException(replicaException)) { + if (TransportActions.isShardNotAvailableException(replicaException)) { decPendingAndFinishIfNeeded(); } else { RestStatus restStatus = ExceptionsHelper.status(replicaException); @@ -283,7 +283,7 @@ public class ReplicationOperation< } private void decPendingAndFinishIfNeeded() { - assert pendingActions.get() > 0; + assert pendingActions.get() > 0 : "pending action count goes below 0 for request [" + request + "]"; if (pendingActions.decrementAndGet() == 0) { finish(); } @@ -314,30 +314,6 @@ public class ReplicationOperation< } } - - /** - * Should an exception be ignored when the operation is performed on the replica. - */ - public static boolean ignoreReplicaException(Exception e) { - if (TransportActions.isShardNotAvailableException(e)) { - return true; - } - // on version conflict or document missing, it means - // that a new change has crept into the replica, and it's fine - if (isConflictException(e)) { - return true; - } - return false; - } - - public static boolean isConflictException(Throwable t) { - final Throwable cause = ExceptionsHelper.unwrapCause(t); - // on version conflict or document missing, it means - // that a new change has crept into the replica, and it's fine - return cause instanceof VersionConflictEngineException; - } - - public interface Primary< Request extends ReplicationRequest, ReplicaRequest extends ReplicationRequest, diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index d520b3d4e70..091f96c408f 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -55,7 +55,6 @@ public abstract class ReplicationRequest failuresList = null; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (TOTAL.equals(currentFieldName)) { + total = parser.intValue(); + } else if (SUCCESSFUL.equals(currentFieldName)) { + successful = parser.intValue(); + } else if (FAILED.equals(currentFieldName) == false) { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (FAILURES.equals(currentFieldName)) { + failuresList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + failuresList.add(Failure.fromXContent(parser)); + } + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } + } + Failure[] failures = EMPTY; + if (failuresList != null) { + failures = failuresList.toArray(new Failure[failuresList.size()]); + } + return new ShardInfo(total, successful, failures); + } + @Override public String toString() { return "ShardInfo{" + @@ -201,7 +244,7 @@ public class ReplicationResponse extends ActionResponse { return shardInfo; } - public static class Failure implements ShardOperationFailedException, ToXContent { + public static class Failure implements ShardOperationFailedException, ToXContentObject { private static final String _INDEX = "_index"; private static final String _SHARD = "_shard"; @@ -331,13 +374,52 @@ public class ReplicationResponse extends ActionResponse { builder.field(_NODE, nodeId); builder.field(REASON); builder.startObject(); - ElasticsearchException.toXContent(builder, params, cause); + ElasticsearchException.generateThrowableXContent(builder, params, cause); builder.endObject(); builder.field(STATUS, status); builder.field(PRIMARY, primary); builder.endObject(); return builder; } + + public static Failure fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation); + + String shardIndex = null, nodeId = null; + int shardId = -1; + boolean primary = false; + RestStatus status = null; + ElasticsearchException reason = null; + + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (_INDEX.equals(currentFieldName)) { + shardIndex = parser.text(); + } else if (_SHARD.equals(currentFieldName)) { + shardId = parser.intValue(); + } else if (_NODE.equals(currentFieldName)) { + nodeId = parser.text(); + } else if (STATUS.equals(currentFieldName)) { + status = RestStatus.valueOf(parser.text()); + } else if (PRIMARY.equals(currentFieldName)) { + primary = parser.booleanValue(); + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (REASON.equals(currentFieldName)) { + reason = ElasticsearchException.fromXContent(parser); + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } + } + return new Failure(new ShardId(shardIndex, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), nodeId, reason, status, primary); + } } } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java index e33d10eaa25..8193cf77ceb 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java @@ -119,7 +119,6 @@ public abstract class TransportBroadcastReplicationAction shardActionListener) { ShardRequest shardRequest = newShardRequest(request, shardId); shardRequest.setParentTask(clusterService.localNode().getId(), task.getId()); - taskManager.registerChildTask(task, clusterService.localNode().getId()); replicatedBroadcastShardAction.execute(shardRequest, shardActionListener); } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 8cff657e3dd..32f5b8f0f2c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.support.replication; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionResponse; @@ -43,6 +44,7 @@ import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; @@ -57,6 +59,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; @@ -170,7 +173,8 @@ public abstract class TransportReplicationAction< * @param shardRequest the request to the primary shard * @param primary the primary shard to perform the operation on */ - protected abstract PrimaryResult shardOperationOnPrimary(Request shardRequest, IndexShard primary) throws Exception; + protected abstract PrimaryResult shardOperationOnPrimary( + Request shardRequest, IndexShard primary) throws Exception; /** * Synchronous replica operation on nodes with replica copies. This is done under the lock form @@ -182,17 +186,19 @@ public abstract class TransportReplicationAction< protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest, IndexShard replica) throws Exception; /** - * Cluster level block to check before request execution + * Cluster level block to check before request execution. Returning null means that no blocks need to be checked. */ + @Nullable protected ClusterBlockLevel globalBlockLevel() { - return ClusterBlockLevel.WRITE; + return null; } /** - * Index level block to check before request execution + * Index level block to check before request execution. Returning null means that no blocks need to be checked. */ + @Nullable protected ClusterBlockLevel indexBlockLevel() { - return ClusterBlockLevel.WRITE; + return null; } /** @@ -309,7 +315,7 @@ public abstract class TransportReplicationAction< } else { setPhase(replicationTask, "primary"); final IndexMetaData indexMetaData = clusterService.state().getMetaData().index(request.shardId().getIndex()); - final boolean executeOnReplicas = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings()); + final boolean executeOnReplicas = (indexMetaData == null) || shouldExecuteReplication(indexMetaData); final ActionListener listener = createResponseListener(primaryShardReference); createReplicatedOperation(request, ActionListener.wrap(result -> result.respond(listener), listener::onFailure), @@ -359,8 +365,8 @@ public abstract class TransportReplicationAction< }; } - protected ReplicationOperation createReplicatedOperation( - Request request, ActionListener listener, + protected ReplicationOperation> createReplicatedOperation( + Request request, ActionListener> listener, PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { return new ReplicationOperation<>(request, primaryShardReference, listener, executeOnReplicas, replicasProxy, clusterService::state, logger, actionName @@ -368,10 +374,12 @@ public abstract class TransportReplicationAction< } } - protected class PrimaryResult implements ReplicationOperation.PrimaryResult { + protected static class PrimaryResult, + Response extends ReplicationResponse> + implements ReplicationOperation.PrimaryResult { final ReplicaRequest replicaRequest; - final Response finalResponseIfSuccessful; - final Exception finalFailure; + public final Response finalResponseIfSuccessful; + public final Exception finalFailure; /** * Result of executing a primary operation @@ -411,7 +419,7 @@ public abstract class TransportReplicationAction< } } - protected class ReplicaResult { + protected static class ReplicaResult { final Exception finalFailure; public ReplicaResult(Exception finalFailure) { @@ -506,16 +514,15 @@ public abstract class TransportReplicationAction< request), e); request.onRetry(); - final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext(); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { - context.close(); // Forking a thread on local node via transport service so that custom transport service have an // opportunity to execute custom logic before the replica operation begins String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]"; TransportChannelResponseHandler handler = - new TransportChannelResponseHandler<>(logger, channel, extraMessage, () -> TransportResponse.Empty.INSTANCE); + new TransportChannelResponseHandler<>(logger, channel, extraMessage, + () -> TransportResponse.Empty.INSTANCE); transportService.sendRequest(clusterService.localNode(), transportReplicaAction, new ConcreteShardRequest<>(request, targetAllocationID), handler); @@ -568,7 +575,7 @@ public abstract class TransportReplicationAction< private class ResponseListener implements ActionListener { private final ReplicaResponse replicaResponse; - public ResponseListener(ReplicaResponse replicaResponse) { + ResponseListener(ReplicaResponse replicaResponse) { this.replicaResponse = replicaResponse; } @@ -630,7 +637,7 @@ public abstract class TransportReplicationAction< @Override protected void doRun() { setPhase(task, "routing"); - final ClusterState state = observer.observedState(); + final ClusterState state = observer.setAndGetObservedState(); if (handleBlockExceptions(state)) { return; } @@ -642,6 +649,9 @@ public abstract class TransportReplicationAction< retry(new IndexNotFoundException(concreteIndex)); return; } + if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { + throw new IndexClosedException(indexMetaData.getIndex()); + } // resolve all derived request fields, so we can route and apply it resolveRequest(state.metaData(), indexMetaData, request); @@ -653,7 +663,6 @@ public abstract class TransportReplicationAction< return; } final DiscoveryNode node = state.nodes().get(primary.currentNodeId()); - taskManager.registerChildTask(task, node.getId()); if (primary.currentNodeId().equals(state.nodes().getLocalNodeId())) { performLocalAction(state, primary, node); } else { @@ -664,7 +673,7 @@ public abstract class TransportReplicationAction< private void performLocalAction(ClusterState state, ShardRouting primary, DiscoveryNode node) { setPhase(task, "waiting_on_primary"); if (logger.isTraceEnabled()) { - logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ", + logger.trace("send action [{}] to local primary [{}] for request [{}] with cluster state version [{}] to [{}] ", transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId()); } performAction(node, transportPrimaryAction, true, new ConcreteShardRequest<>(request, primary.allocationId().getId())); @@ -718,15 +727,21 @@ public abstract class TransportReplicationAction< } private boolean handleBlockExceptions(ClusterState state) { - ClusterBlockException blockException = state.blocks().globalBlockedException(globalBlockLevel()); - if (blockException != null) { - handleBlockException(blockException); - return true; + ClusterBlockLevel globalBlockLevel = globalBlockLevel(); + if (globalBlockLevel != null) { + ClusterBlockException blockException = state.blocks().globalBlockedException(globalBlockLevel); + if (blockException != null) { + handleBlockException(blockException); + return true; + } } - blockException = state.blocks().indexBlockedException(indexBlockLevel(), concreteIndex(state)); - if (blockException != null) { - handleBlockException(blockException); - return true; + ClusterBlockLevel indexBlockLevel = indexBlockLevel(); + if (indexBlockLevel != null) { + ClusterBlockException blockException = state.blocks().indexBlockedException(indexBlockLevel, concreteIndex(state)); + if (blockException != null) { + handleBlockException(blockException); + return true; + } } return false; } @@ -793,11 +808,9 @@ public abstract class TransportReplicationAction< } setPhase(task, "waiting_for_retry"); request.onRetry(); - final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext(); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { - context.close(); run(); } @@ -808,7 +821,6 @@ public abstract class TransportReplicationAction< @Override public void onTimeout(TimeValue timeout) { - context.close(); // Try one more time... run(); } @@ -898,8 +910,8 @@ public abstract class TransportReplicationAction< * Indicated whether this operation should be replicated to shadow replicas or not. If this method returns true the replication phase * will be skipped. For example writes such as index and delete don't need to be replicated on shadow replicas but refresh and flush do. */ - protected boolean shouldExecuteReplication(Settings settings) { - return IndexMetaData.isIndexUsingShadowReplicas(settings) == false; + protected boolean shouldExecuteReplication(IndexMetaData indexMetaData) { + return indexMetaData.isIndexUsingShadowReplicas() == false; } class ShardReference implements Releasable { @@ -927,7 +939,8 @@ public abstract class TransportReplicationAction< } - class PrimaryShardReference extends ShardReference implements ReplicationOperation.Primary { + class PrimaryShardReference extends ShardReference + implements ReplicationOperation.Primary> { PrimaryShardReference(IndexShard indexShard, Releasable operationLock) { super(indexShard, operationLock); @@ -950,6 +963,8 @@ public abstract class TransportReplicationAction< public PrimaryResult perform(Request request) throws Exception { PrimaryResult result = shardOperationOnPrimary(request, indexShard); if (result.replicaRequest() != null) { + assert result.finalFailure == null : "a replica request [" + result.replicaRequest() + + "] with a primary failure [" + result.finalFailure + "]"; result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm()); } return result; @@ -983,16 +998,25 @@ public abstract class TransportReplicationAction< @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - localCheckpoint = in.readZLong(); - allocationId = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + super.readFrom(in); + localCheckpoint = in.readZLong(); + allocationId = in.readString(); + } else { + // 5.x used to read empty responses, which don't really read anything off the stream, so just do nothing. + } } @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeZLong(localCheckpoint); - out.writeString(allocationId); + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + super.writeTo(out); + out.writeZLong(localCheckpoint); + out.writeString(allocationId); + } else { + // we use to write empty responses + Empty.INSTANCE.writeTo(out); + } } @Override @@ -1016,10 +1040,9 @@ public abstract class TransportReplicationAction< listener.onFailure(new NoNodeAvailableException("unknown node [" + nodeId + "]")); return; } - transportService.sendRequest(node, transportReplicaAction, - new ConcreteShardRequest<>(request, replica.allocationId().getId()), transportOptions, - // Eclipse can't handle when this is <> so we specify the type here. - new ActionListenerResponseHandler(listener, ReplicaResponse::new)); + final ConcreteShardRequest concreteShardRequest = + new ConcreteShardRequest<>(request, replica.allocationId().getId()); + sendReplicaRequest(concreteShardRequest, node, listener); } @Override @@ -1060,6 +1083,14 @@ public abstract class TransportReplicationAction< } } + /** sends the given replica request to the supplied nodes */ + protected void sendReplicaRequest(ConcreteShardRequest concreteShardRequest, DiscoveryNode node, + ActionListener listener) { + transportService.sendRequest(node, transportReplicaAction, concreteShardRequest, transportOptions, + // Eclipse can't handle when this is <> so we specify the type here. + new ActionListenerResponseHandler(listener, ReplicaResponse::new)); + } + /** a wrapper class to encapsulate a request when being sent to a specific allocation id **/ public static final class ConcreteShardRequest extends TransportRequest { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 15f269c46f5..8569b28257f 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -21,10 +21,13 @@ package org.elasticsearch.action.support.replication; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -67,7 +70,8 @@ public abstract class TransportWriteAction< * async refresh is performed on the primary shard according to the Request refresh policy */ @Override - protected abstract WritePrimaryResult shardOperationOnPrimary(Request request, IndexShard primary) throws Exception; + protected abstract WritePrimaryResult shardOperationOnPrimary( + Request request, IndexShard primary) throws Exception; /** * Called once per replica with a reference to the replica {@linkplain IndexShard} to modify. @@ -76,19 +80,24 @@ public abstract class TransportWriteAction< * async refresh is performed on the replica shard according to the ReplicaRequest refresh policy */ @Override - protected abstract WriteReplicaResult shardOperationOnReplica(ReplicaRequest request, IndexShard replica) throws Exception; + protected abstract WriteReplicaResult shardOperationOnReplica( + ReplicaRequest request, IndexShard replica) throws Exception; /** * Result of taking the action on the primary. */ - protected class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult { + protected static class WritePrimaryResult, + Response extends ReplicationResponse & WriteResponse> extends PrimaryResult + implements RespondingWriteResult { boolean finishedAsyncActions; + public final Location location; ActionListener listener = null; public WritePrimaryResult(ReplicaRequest request, @Nullable Response finalResponse, @Nullable Location location, @Nullable Exception operationFailure, - IndexShard primary) { + IndexShard primary, Logger logger) { super(request, finalResponse, operationFailure); + this.location = location; assert location == null || operationFailure == null : "expected either failure to be null or translog location to be null, " + "but found: [" + location + "] translog location and [" + operationFailure + "] failure"; @@ -138,13 +147,16 @@ public abstract class TransportWriteAction< /** * Result of taking the action on the replica. */ - protected class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult { + protected static class WriteReplicaResult> + extends ReplicaResult implements RespondingWriteResult { + public final Location location; boolean finishedAsyncActions; private ActionListener listener; public WriteReplicaResult(ReplicaRequest request, @Nullable Location location, - @Nullable Exception operationFailure, IndexShard replica) { + @Nullable Exception operationFailure, IndexShard replica, Logger logger) { super(operationFailure); + this.location = location; if (operationFailure != null) { this.finishedAsyncActions = true; } else { @@ -184,6 +196,16 @@ public abstract class TransportWriteAction< } } + @Override + protected ClusterBlockLevel globalBlockLevel() { + return ClusterBlockLevel.WRITE; + } + + @Override + protected ClusterBlockLevel indexBlockLevel() { + return ClusterBlockLevel.WRITE; + } + /** * callback used by {@link AsyncAfterWriteAction} to notify that all post * process actions have been executed diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java index 7670eab102a..421e2458b0d 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java @@ -111,7 +111,6 @@ public abstract class TransportInstanceSingleOperationAction listener) { this.request = request; @@ -119,13 +118,12 @@ public abstract class TransportInstanceSingleOperationAction() { @Override @@ -221,18 +219,18 @@ public abstract class TransportInstanceSingleOperationAction() { @Override @@ -403,10 +402,10 @@ public abstract class TransportTasksAction< protected List exceptions; protected List results; - public NodeTasksResponse() { + NodeTasksResponse() { } - public NodeTasksResponse(String nodeId, + NodeTasksResponse(String nodeId, List results, List exceptions) { this.nodeId = nodeId; diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java b/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java index 233d4b0c638..8508c834a9f 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java @@ -24,14 +24,14 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Arrays; import java.util.Iterator; -public class MultiTermVectorsResponse extends ActionResponse implements Iterable, ToXContent { +public class MultiTermVectorsResponse extends ActionResponse implements Iterable, ToXContentObject { /** * Represents a failure. @@ -124,6 +124,7 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.startArray(Fields.DOCS); for (MultiTermVectorsItemResponse response : responses) { if (response.isFailed()) { @@ -132,16 +133,15 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable builder.field(Fields._INDEX, failure.getIndex()); builder.field(Fields._TYPE, failure.getType()); builder.field(Fields._ID, failure.getId()); - ElasticsearchException.renderException(builder, params, failure.getCause()); + ElasticsearchException.generateFailureXContent(builder, params, failure.getCause(), true); builder.endObject(); } else { TermVectorsResponse getResponse = response.getResponse(); - builder.startObject(); getResponse.toXContent(builder, params); - builder.endObject(); } } builder.endArray(); + builder.endObject(); return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java index 534ef4164e2..088691a5c9c 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java @@ -200,7 +200,7 @@ public final class TermVectorsFields extends Fields { private long sumDocFreq; private int docCount; - public TermVector(BytesReference termVectors, long readOffset) throws IOException { + TermVector(BytesReference termVectors, long readOffset) throws IOException { this.perFieldTermVectorInput = termVectors.streamInput(); this.readOffset = readOffset; reset(); diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index b83713e3a6a..cba14fecf9c 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.RealtimeRequest; import org.elasticsearch.action.ValidateActions; @@ -33,7 +34,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import java.io.IOException; @@ -63,6 +66,8 @@ public class TermVectorsRequest extends SingleShardRequest i private BytesReference doc; + private XContentType xContentType; + private String routing; private String parent; @@ -156,8 +161,9 @@ public class TermVectorsRequest extends SingleShardRequest i super(other.index()); this.id = other.id(); this.type = other.type(); - if (this.doc != null) { + if (other.doc != null) { this.doc = new BytesArray(other.doc().toBytesRef(), true); + this.xContentType = other.xContentType; } this.flagsEnum = other.getFlags().clone(); this.preference = other.preference(); @@ -225,22 +231,36 @@ public class TermVectorsRequest extends SingleShardRequest i return doc; } - /** - * Sets an artificial document from which term vectors are requested for. - */ - public TermVectorsRequest doc(XContentBuilder documentBuilder) { - return this.doc(documentBuilder.bytes(), true); + public XContentType xContentType() { + return xContentType; } /** * Sets an artificial document from which term vectors are requested for. */ + public TermVectorsRequest doc(XContentBuilder documentBuilder) { + return this.doc(documentBuilder.bytes(), true, documentBuilder.contentType()); + } + + /** + * Sets an artificial document from which term vectors are requested for. + * @deprecated use {@link #doc(BytesReference, boolean, XContentType)} to avoid content auto detection + */ + @Deprecated public TermVectorsRequest doc(BytesReference doc, boolean generateRandomId) { + return this.doc(doc, generateRandomId, XContentFactory.xContentType(doc)); + } + + /** + * Sets an artificial document from which term vectors are requested for. + */ + public TermVectorsRequest doc(BytesReference doc, boolean generateRandomId, XContentType xContentType) { // assign a random id to this artificial document, for routing if (generateRandomId) { this.id(String.valueOf(randomInt.getAndAdd(1))); } this.doc = doc; + this.xContentType = xContentType; return this; } @@ -479,6 +499,11 @@ public class TermVectorsRequest extends SingleShardRequest i if (in.readBoolean()) { doc = in.readBytesReference(); + if (in.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting + xContentType = XContentType.readFrom(in); + } else { + xContentType = XContentFactory.xContentType(doc); + } } routing = in.readOptionalString(); parent = in.readOptionalString(); @@ -519,6 +544,9 @@ public class TermVectorsRequest extends SingleShardRequest i out.writeBoolean(doc != null); if (doc != null) { out.writeBytesReference(doc); + if (out.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting + xContentType.writeTo(out); + } } out.writeOptionalString(routing); out.writeOptionalString(parent); @@ -549,7 +577,7 @@ public class TermVectorsRequest extends SingleShardRequest i out.writeLong(version); } - public static enum Flag { + public enum Flag { // Do not change the order of these flags we use // the ordinal for encoding! Only append to the end! Positions, Offsets, Payloads, FieldStatistics, TermStatistics diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java index 964aa00b5c3..c63400be7e9 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java @@ -36,7 +36,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.dfs.AggregatedDfs; @@ -46,7 +46,7 @@ import java.util.EnumSet; import java.util.Iterator; import java.util.Set; -public class TermVectorsResponse extends ActionResponse implements ToXContent { +public class TermVectorsResponse extends ActionResponse implements ToXContentObject { private static class FieldStrings { // term statistics strings @@ -174,6 +174,7 @@ public class TermVectorsResponse extends ActionResponse implements ToXContent { assert index != null; assert type != null; assert id != null; + builder.startObject(); builder.field(FieldStrings._INDEX, index); builder.field(FieldStrings._TYPE, type); if (!isArtificial()) { @@ -182,15 +183,15 @@ public class TermVectorsResponse extends ActionResponse implements ToXContent { builder.field(FieldStrings._VERSION, docVersion); builder.field(FieldStrings.FOUND, isExists()); builder.field(FieldStrings.TOOK, tookInMillis); - if (!isExists()) { - return builder; - } - builder.startObject(FieldStrings.TERM_VECTORS); - final CharsRefBuilder spare = new CharsRefBuilder(); - Fields theFields = getFields(); - Iterator fieldIter = theFields.iterator(); - while (fieldIter.hasNext()) { - buildField(builder, spare, theFields, fieldIter); + if (isExists()) { + builder.startObject(FieldStrings.TERM_VECTORS); + final CharsRefBuilder spare = new CharsRefBuilder(); + Fields theFields = getFields(); + Iterator fieldIter = theFields.iterator(); + while (fieldIter.hasNext()) { + buildField(builder, spare, theFields, fieldIter); + } + builder.endObject(); } builder.endObject(); return builder; diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 45e4aa8afdd..0235dd95a4b 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -19,19 +19,17 @@ package org.elasticsearch.action.update; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.TransportActions; @@ -63,11 +61,12 @@ import java.util.Collections; import java.util.Map; import static org.elasticsearch.ExceptionsHelper.unwrapCause; +import static org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction.toSingleItemBulkRequest; +import static org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction.wrapBulkResponse; public class TransportUpdateAction extends TransportInstanceSingleOperationAction { - private final TransportDeleteAction deleteAction; - private final TransportIndexAction indexAction; + private final TransportBulkAction bulkAction; private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; private final UpdateHelper updateHelper; @@ -75,12 +74,10 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio @Inject public TransportUpdateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, - TransportIndexAction indexAction, TransportDeleteAction deleteAction, TransportCreateIndexAction createIndexAction, - UpdateHelper updateHelper, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - IndicesService indicesService, AutoCreateIndex autoCreateIndex) { + TransportBulkAction bulkAction, TransportCreateIndexAction createIndexAction, UpdateHelper updateHelper, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, AutoCreateIndex autoCreateIndex) { super(settings, UpdateAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpdateRequest::new); - this.indexAction = indexAction; - this.deleteAction = deleteAction; + this.bulkAction = bulkAction; this.createIndexAction = createIndexAction; this.updateHelper = updateHelper; this.indicesService = indicesService; @@ -162,7 +159,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio return new PlainShardIterator(shardIterator.shardId(), Collections.singletonList(shard)); } } - return new PlainShardIterator(shardIterator.shardId(), Collections.emptyList()); + return new PlainShardIterator(shardIterator.shardId(), Collections.emptyList()); } @Override @@ -180,100 +177,46 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio IndexRequest upsertRequest = result.action(); // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request final BytesReference upsertSourceBytes = upsertRequest.source(); - indexAction.execute(upsertRequest, new ActionListener() { - @Override - public void onResponse(IndexResponse response) { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), response.getResult()); - if ((request.fetchSource() != null && request.fetchSource().fetchSource()) || - (request.fields() != null && request.fields().length > 0)) { - Tuple> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true); - update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); - } else { - update.setGetResult(null); - } - update.setForcedRefresh(response.forcedRefresh()); - listener.onResponse(update); - } - - @Override - public void onFailure(Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - if (cause instanceof VersionConflictEngineException) { - if (retryCount < request.retryOnConflict()) { - logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]", - retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id()); - threadPool.executor(executor()).execute(new ActionRunnable(listener) { - @Override - protected void doRun() { - shardOperation(request, listener, retryCount + 1); - } - }); - return; + bulkAction.execute(toSingleItemBulkRequest(upsertRequest), wrapBulkResponse( + ActionListener.wrap(response -> { + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), response.getResult()); + if ((request.fetchSource() != null && request.fetchSource().fetchSource()) || + (request.fields() != null && request.fields().length > 0)) { + Tuple> sourceAndContent = + XContentHelper.convertToMap(upsertSourceBytes, true, upsertRequest.getContentType()); + update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); + } else { + update.setGetResult(null); } - } - listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); - } - }); + update.setForcedRefresh(response.forcedRefresh()); + listener.onResponse(update); + }, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount))) + ); + break; case UPDATED: IndexRequest indexRequest = result.action(); // we fetch it from the index request so we don't generate the bytes twice, its already done in the index request final BytesReference indexSourceBytes = indexRequest.source(); - indexAction.execute(indexRequest, new ActionListener() { - @Override - public void onResponse(IndexResponse response) { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), response.getResult()); - update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); - update.setForcedRefresh(response.forcedRefresh()); - listener.onResponse(update); - } - - @Override - public void onFailure(Exception e) { - final Throwable cause = unwrapCause(e); - if (cause instanceof VersionConflictEngineException) { - if (retryCount < request.retryOnConflict()) { - threadPool.executor(executor()).execute(new ActionRunnable(listener) { - @Override - protected void doRun() { - shardOperation(request, listener, retryCount + 1); - } - }); - return; - } - } - listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); - } - }); + bulkAction.execute(toSingleItemBulkRequest(indexRequest), wrapBulkResponse( + ActionListener.wrap(response -> { + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), response.getResult()); + update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); + update.setForcedRefresh(response.forcedRefresh()); + listener.onResponse(update); + }, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount))) + ); break; case DELETED: DeleteRequest deleteRequest = result.action(); - deleteAction.execute(deleteRequest, new ActionListener() { - @Override - public void onResponse(DeleteResponse response) { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), response.getResult()); - update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); - update.setForcedRefresh(response.forcedRefresh()); - listener.onResponse(update); - } - - @Override - public void onFailure(Exception e) { - final Throwable cause = unwrapCause(e); - if (cause instanceof VersionConflictEngineException) { - if (retryCount < request.retryOnConflict()) { - threadPool.executor(executor()).execute(new ActionRunnable(listener) { - @Override - protected void doRun() { - shardOperation(request, listener, retryCount + 1); - } - }); - return; - } - } - listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); - } - }); + bulkAction.execute(toSingleItemBulkRequest(deleteRequest), wrapBulkResponse( + ActionListener.wrap(response -> { + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), response.getResult()); + update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); + update.setForcedRefresh(response.forcedRefresh()); + listener.onResponse(update); + }, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount))) + ); break; case NOOP: UpdateResponse update = result.action(); @@ -290,4 +233,23 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio throw new IllegalStateException("Illegal result " + result.getResponseResult()); } } + + private void handleUpdateFailureWithRetry(final ActionListener listener, final UpdateRequest request, + final Exception failure, int retryCount) { + final Throwable cause = unwrapCause(failure); + if (cause instanceof VersionConflictEngineException) { + if (retryCount < request.retryOnConflict()) { + logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]", + retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id()); + threadPool.executor(executor()).execute(new ActionRunnable(listener) { + @Override + protected void doRun() { + shardOperation(request, listener, retryCount + 1); + } + }); + return; + } + } + listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); + } } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index a812cb22eb6..504a297627d 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -27,11 +27,8 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -53,8 +50,6 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; public class UpdateRequest extends InstanceShardOperationRequest implements DocWriteRequest, WriteRequest { - private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(Loggers.getLogger(UpdateRequest.class)); private String type; private String id; @@ -560,7 +555,9 @@ public class UpdateRequest extends InstanceShardOperationRequest /** * Sets the doc to use for updates when a script is not specified. + * @deprecated use {@link #doc(String, XContentType)} */ + @Deprecated public UpdateRequest doc(String source) { safeDoc().source(source); return this; @@ -569,6 +566,16 @@ public class UpdateRequest extends InstanceShardOperationRequest /** * Sets the doc to use for updates when a script is not specified. */ + public UpdateRequest doc(String source, XContentType xContentType) { + safeDoc().source(source, xContentType); + return this; + } + + /** + * Sets the doc to use for updates when a script is not specified. + * @deprecated use {@link #doc(byte[], XContentType)} + */ + @Deprecated public UpdateRequest doc(byte[] source) { safeDoc().source(source); return this; @@ -577,11 +584,29 @@ public class UpdateRequest extends InstanceShardOperationRequest /** * Sets the doc to use for updates when a script is not specified. */ + public UpdateRequest doc(byte[] source, XContentType xContentType) { + safeDoc().source(source, xContentType); + return this; + } + + /** + * Sets the doc to use for updates when a script is not specified. + * @deprecated use {@link #doc(byte[], int, int, XContentType)} + */ + @Deprecated public UpdateRequest doc(byte[] source, int offset, int length) { safeDoc().source(source, offset, length); return this; } + /** + * Sets the doc to use for updates when a script is not specified. + */ + public UpdateRequest doc(byte[] source, int offset, int length, XContentType xContentType) { + safeDoc().source(source, offset, length, xContentType); + return this; + } + /** * Sets the doc to use for updates when a script is not specified, the doc provided * is a field and value pairs. @@ -592,10 +617,11 @@ public class UpdateRequest extends InstanceShardOperationRequest } /** - * Sets the doc to use for updates when a script is not specified. + * Sets the doc to use for updates when a script is not specified, the doc provided + * is a field and value pairs. */ - public UpdateRequest doc(String field, Object value) { - safeDoc().source(field, value); + public UpdateRequest doc(XContentType xContentType, Object... source) { + safeDoc().source(xContentType, source); return this; } @@ -645,7 +671,9 @@ public class UpdateRequest extends InstanceShardOperationRequest /** * Sets the doc source of the update request to be used when the document does not exists. + * @deprecated use {@link #upsert(String, XContentType)} */ + @Deprecated public UpdateRequest upsert(String source) { safeUpsertRequest().source(source); return this; @@ -654,6 +682,16 @@ public class UpdateRequest extends InstanceShardOperationRequest /** * Sets the doc source of the update request to be used when the document does not exists. */ + public UpdateRequest upsert(String source, XContentType xContentType) { + safeUpsertRequest().source(source, xContentType); + return this; + } + + /** + * Sets the doc source of the update request to be used when the document does not exists. + * @deprecated use {@link #upsert(byte[], XContentType)} + */ + @Deprecated public UpdateRequest upsert(byte[] source) { safeUpsertRequest().source(source); return this; @@ -662,11 +700,29 @@ public class UpdateRequest extends InstanceShardOperationRequest /** * Sets the doc source of the update request to be used when the document does not exists. */ + public UpdateRequest upsert(byte[] source, XContentType xContentType) { + safeUpsertRequest().source(source, xContentType); + return this; + } + + /** + * Sets the doc source of the update request to be used when the document does not exists. + * @deprecated use {@link #upsert(byte[], int, int, XContentType)} + */ + @Deprecated public UpdateRequest upsert(byte[] source, int offset, int length) { safeUpsertRequest().source(source, offset, length); return this; } + /** + * Sets the doc source of the update request to be used when the document does not exists. + */ + public UpdateRequest upsert(byte[] source, int offset, int length, XContentType xContentType) { + safeUpsertRequest().source(source, offset, length, xContentType); + return this; + } + /** * Sets the doc source of the update request to be used when the document does not exists. The doc * includes field and value pairs. @@ -676,6 +732,15 @@ public class UpdateRequest extends InstanceShardOperationRequest return this; } + /** + * Sets the doc source of the update request to be used when the document does not exists. The doc + * includes field and value pairs. + */ + public UpdateRequest upsert(XContentType xContentType, Object... source) { + safeUpsertRequest().source(xContentType, source); + return this; + } + public IndexRequest upsertRequest() { return this.upsertRequest; } @@ -714,7 +779,7 @@ public class UpdateRequest extends InstanceShardOperationRequest if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if ("script".equals(currentFieldName)) { - script = Script.parse(parser, ParseFieldMatcher.EMPTY); + script = Script.parse(parser); } else if ("scripted_upsert".equals(currentFieldName)) { scriptedUpsert = parser.booleanValue(); } else if ("upsert".equals(currentFieldName)) { @@ -740,7 +805,7 @@ public class UpdateRequest extends InstanceShardOperationRequest fields(fields.toArray(new String[fields.size()])); } } else if ("_source".equals(currentFieldName)) { - fetchSourceContext = FetchSourceContext.parse(parser); + fetchSourceContext = FetchSourceContext.fromXContent(parser); } } if (script != null) { diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index 50d84a24129..1a4d4077b10 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -223,7 +223,9 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder PARSER; + static { + PARSER = new ConstructingObjectParser<>(UpdateResponse.class.getName(), + args -> { + // index uuid and shard id are unknown and can't be parsed back for now. + String index = (String) args[0]; + ShardId shardId = new ShardId(new Index(index, IndexMetaData.INDEX_UUID_NA_VALUE), -1); + String type = (String) args[1]; + String id = (String) args[2]; + long version = (long) args[3]; + ShardInfo shardInfo = (ShardInfo) args[5]; + Long seqNo = (Long) args[6]; + + Result result = null; + for (Result r : Result.values()) { + if (r.getLowercase().equals(args[4])) { + result = r; + break; + } + } + + UpdateResponse updateResponse = null; + if (shardInfo != null && seqNo != null) { + updateResponse = new UpdateResponse(shardInfo, shardId, type, id, seqNo, version, result); + } else { + updateResponse = new UpdateResponse(shardId, type, id, version, result); + } + return updateResponse; + }); + + DocWriteResponse.declareParserFields(PARSER); + BiConsumer setGetResult = (update, get) -> + update.setGetResult(new GetResult(update.getIndex(), update.getType(), update.getId(), update.getVersion(), + get.isExists(), get.internalSourceRef(), get.getFields())); + PARSER.declareObject(setGetResult, (parser, context) -> GetResult.fromXContentEmbedded(parser), new ParseField(GET)); + } + + public static UpdateResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 7cac8415e6e..d3766cc958c 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -30,16 +30,17 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.StringHelper; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.PidFile; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.inject.CreationException; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.IfConfig; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.env.Environment; @@ -48,7 +49,7 @@ import org.elasticsearch.monitor.os.OsProbe; import org.elasticsearch.monitor.process.ProcessProbe; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeValidationException; -import org.elasticsearch.node.internal.InternalSettingsPreparer; +import org.elasticsearch.node.InternalSettingsPreparer; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -58,9 +59,7 @@ import java.net.URISyntaxException; import java.nio.file.Path; import java.security.NoSuchAlgorithmException; import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; +import java.util.Collections; import java.util.concurrent.CountDownLatch; /** @@ -209,6 +208,9 @@ final class Bootstrap { throw new BootstrapException(e); } + // Log ifconfig output before SecurityManager is installed + IfConfig.logIfNecessary(); + // install SM after natives, shutdown hooks, etc. try { Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings)); @@ -226,13 +228,38 @@ final class Bootstrap { }; } - private static Environment initialEnvironment(boolean foreground, Path pidFile, Map esSettings) { + private static SecureSettings loadSecureSettings(Environment initialEnv) throws BootstrapException { + final KeyStoreWrapper keystore; + try { + keystore = KeyStoreWrapper.load(initialEnv.configFile()); + } catch (IOException e) { + throw new BootstrapException(e); + } + if (keystore == null) { + return null; // no keystore + } + + try { + keystore.decrypt(new char[0] /* TODO: read password from stdin */); + } catch (Exception e) { + throw new BootstrapException(e); + } + return keystore; + } + + + private static Environment createEnvironment(boolean foreground, Path pidFile, + SecureSettings secureSettings, Settings initialSettings) { Terminal terminal = foreground ? Terminal.DEFAULT : null; Settings.Builder builder = Settings.builder(); if (pidFile != null) { builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile); } - return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, esSettings); + builder.put(initialSettings); + if (secureSettings != null) { + builder.setSecureSettings(secureSettings); + } + return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, Collections.emptyMap()); } private void start() throws NodeValidationException { @@ -262,7 +289,7 @@ final class Bootstrap { final boolean foreground, final Path pidFile, final boolean quiet, - final Map esSettings) throws BootstrapException, NodeValidationException, UserException { + final Environment initialEnv) throws BootstrapException, NodeValidationException, UserException { // Set the system property before anything has a chance to trigger its use initLoggerPrefix(); @@ -272,7 +299,8 @@ final class Bootstrap { INSTANCE = new Bootstrap(); - Environment environment = initialEnvironment(foreground, pidFile, esSettings); + final SecureSettings keystore = loadSecureSettings(initialEnv); + Environment environment = createEnvironment(foreground, pidFile, keystore, initialEnv.settings()); try { LogConfigurator.configure(environment); } catch (IOException e) { @@ -310,6 +338,14 @@ final class Bootstrap { INSTANCE.setup(true, environment); + /* TODO: close this once s3 repository doesn't try to read during repository construction + try { + // any secure settings must be read during node construction + IOUtils.close(keystore); + } catch (IOException e) { + throw new BootstrapException(e); + }*/ + INSTANCE.start(); if (closeStandardStreams) { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index cb1b93aef39..231b9ac6d03 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -207,7 +207,7 @@ final class BootstrapChecks { static class OsXFileDescriptorCheck extends FileDescriptorCheck { - public OsXFileDescriptorCheck() { + OsXFileDescriptorCheck() { // see constant OPEN_MAX defined in // /usr/include/sys/syslimits.h on OS X and its use in JVM // initialization in int os:init_2(void) defined in the JVM @@ -258,7 +258,7 @@ final class BootstrapChecks { private final boolean mlockallSet; - public MlockallCheck(final boolean mlockAllSet) { + MlockallCheck(final boolean mlockAllSet) { this.mlockallSet = mlockAllSet; } @@ -360,7 +360,7 @@ final class BootstrapChecks { // visible for testing long getMaxMapCount(Logger logger) { final Path path = getProcSysVmMaxMapCountPath(); - try (final BufferedReader bufferedReader = getBufferedReader(path)) { + try (BufferedReader bufferedReader = getBufferedReader(path)) { final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader); if (rawProcSysVmMaxMapCount != null) { try { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapException.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapException.java index 540a732dfae..635afaf9599 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapException.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapException.java @@ -20,14 +20,14 @@ package org.elasticsearch.bootstrap; import java.nio.file.Path; -import java.util.Map; /** * Wrapper exception for checked exceptions thrown during the bootstrap process. Methods invoked * during bootstrap should explicitly declare the checked exceptions that they can throw, rather * than declaring the top-level checked exception {@link Exception}. This exception exists to wrap - * these checked exceptions so that {@link Bootstrap#init(boolean, Path, boolean, Map)} does not have to - * declare all of these checked exceptions. + * these checked exceptions so that + * {@link Bootstrap#init(boolean, Path, boolean, org.elasticsearch.env.Environment)} + * does not have to declare all of these checked exceptions. */ class BootstrapException extends Exception { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java b/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java index 795d4d899fb..e8538daec56 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java @@ -48,7 +48,7 @@ final class ESPolicy extends Policy { final PermissionCollection dynamic; final Map plugins; - public ESPolicy(PermissionCollection dynamic, Map plugins, boolean filterBadDefaults) { + ESPolicy(PermissionCollection dynamic, Map plugins, boolean filterBadDefaults) { this.template = Security.readPolicy(getClass().getResource(POLICY_RESOURCE), JarHell.parseClassPath()); this.untrusted = Security.readPolicy(getClass().getResource(UNTRUSTED_RESOURCE), new URL[0]); if (filterBadDefaults) { @@ -150,7 +150,7 @@ final class ESPolicy extends Policy { * @param preImplies a test that is applied to a desired permission before checking if the bad default permission that * this instance wraps implies the desired permission */ - public BadDefaultPermission(final Permission badDefaultPermission, final Predicate preImplies) { + BadDefaultPermission(final Permission badDefaultPermission, final Predicate preImplies) { super(badDefaultPermission.getName()); this.badDefaultPermission = badDefaultPermission; this.preImplies = preImplies; diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 23a24d88f64..fda6e6cfdec 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -25,9 +25,11 @@ import joptsimple.OptionSpecBuilder; import joptsimple.util.PathConverter; import org.elasticsearch.Build; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.SettingCommand; +import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.NodeValidationException; @@ -40,7 +42,7 @@ import java.util.Map; /** * This class starts elasticsearch. */ -class Elasticsearch extends SettingCommand { +class Elasticsearch extends EnvironmentAwareCommand { private final OptionSpecBuilder versionOption; private final OptionSpecBuilder daemonizeOption; @@ -90,7 +92,7 @@ class Elasticsearch extends SettingCommand { } @Override - protected void execute(Terminal terminal, OptionSet options, Map settings) throws UserException { + protected void execute(Terminal terminal, OptionSet options, Environment env) throws UserException { if (options.nonOptionArguments().isEmpty() == false) { throw new UserException(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments()); } @@ -109,16 +111,16 @@ class Elasticsearch extends SettingCommand { final boolean quiet = options.has(quietOption); try { - init(daemonize, pidFile, quiet, settings); + init(daemonize, pidFile, quiet, env); } catch (NodeValidationException e) { throw new UserException(ExitCodes.CONFIG, e.getMessage()); } } - void init(final boolean daemonize, final Path pidFile, final boolean quiet, final Map esSettings) + void init(final boolean daemonize, final Path pidFile, final boolean quiet, Environment initialEnv) throws NodeValidationException, UserException { try { - Bootstrap.init(!daemonize, pidFile, quiet, esSettings); + Bootstrap.init(!daemonize, pidFile, quiet, initialEnv); } catch (BootstrapException | RuntimeException e) { // format exceptions to the console in a special way // to avoid 2MB stacktraces from guice, etc. diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java index 27d4ff69e54..d41d1879152 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java @@ -109,7 +109,7 @@ final class JNAKernel32Library { private final ConsoleCtrlHandler handler; - public NativeHandlerCallback(ConsoleCtrlHandler handler) { + NativeHandlerCallback(ConsoleCtrlHandler handler) { this.handler = handler; } @@ -155,11 +155,11 @@ final class JNAKernel32Library { public static class SizeT extends IntegerType { - public SizeT() { + SizeT() { this(0); } - public SizeT(long value) { + SizeT(long value) { super(Native.SIZE_T_SIZE, value); } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Security.java b/core/src/main/java/org/elasticsearch/bootstrap/Security.java index f8bc436136e..1d778fdf42a 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -85,12 +85,6 @@ import java.util.Map; * cleanups to the scripting apis). But still it can provide some defense for users * that enable dynamic scripting without being fully aware of the consequences. *
    - *

    Disabling Security

    - * SecurityManager can be disabled completely with this setting: - *
    - * es.security.manager.enabled = false
    - * 
    - *
    *

    Debugging Security

    * A good place to start when there is a problem is to turn on security debugging: *
    diff --git a/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java b/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java
    index e6c5b2e6dd1..d9ca9698717 100644
    --- a/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java
    +++ b/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java
    @@ -154,7 +154,7 @@ final class SystemCallFilter {
             public short   len;           // number of filters
             public Pointer filter;        // filters
     
    -        public SockFProg(SockFilter filters[]) {
    +        SockFProg(SockFilter filters[]) {
                 len = (short) filters.length;
                 // serialize struct sock_filter * explicitly, its less confusing than the JNA magic we would need
                 Memory filter = new Memory(len * 8);
    diff --git a/core/src/main/java/org/elasticsearch/cli/Command.java b/core/src/main/java/org/elasticsearch/cli/Command.java
    index 73687393dcd..99a9a7e43d9 100644
    --- a/core/src/main/java/org/elasticsearch/cli/Command.java
    +++ b/core/src/main/java/org/elasticsearch/cli/Command.java
    @@ -65,8 +65,8 @@ public abstract class Command implements Closeable {
                         this.close();
                     } catch (final IOException e) {
                         try (
    -                        final StringWriter sw = new StringWriter();
    -                        final PrintWriter pw = new PrintWriter(sw)) {
    +                        StringWriter sw = new StringWriter();
    +                        PrintWriter pw = new PrintWriter(sw)) {
                             e.printStackTrace(pw);
                             terminal.println(sw.toString());
                         } catch (final IOException impossible) {
    diff --git a/core/src/main/java/org/elasticsearch/cli/SettingCommand.java b/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java
    similarity index 75%
    rename from core/src/main/java/org/elasticsearch/cli/SettingCommand.java
    rename to core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java
    index 17f7c9e5204..8372a6b8ab8 100644
    --- a/core/src/main/java/org/elasticsearch/cli/SettingCommand.java
    +++ b/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java
    @@ -22,16 +22,20 @@ package org.elasticsearch.cli;
     import joptsimple.OptionSet;
     import joptsimple.OptionSpec;
     import joptsimple.util.KeyValuePair;
    +import org.elasticsearch.common.settings.Settings;
    +import org.elasticsearch.env.Environment;
    +import org.elasticsearch.node.InternalSettingsPreparer;
     
     import java.util.HashMap;
     import java.util.Locale;
     import java.util.Map;
     
    -public abstract class SettingCommand extends Command {
    +/** A cli command which requires an {@link org.elasticsearch.env.Environment} to use current paths and settings. */
    +public abstract class EnvironmentAwareCommand extends Command {
     
         private final OptionSpec settingOption;
     
    -    public SettingCommand(String description) {
    +    public EnvironmentAwareCommand(String description) {
             super(description);
             this.settingOption = parser.accepts("E", "Configure a setting").withRequiredArg().ofType(KeyValuePair.class);
         }
    @@ -51,9 +55,15 @@ public abstract class SettingCommand extends Command {
             putSystemPropertyIfSettingIsMissing(settings, "path.home", "es.path.home");
             putSystemPropertyIfSettingIsMissing(settings, "path.logs", "es.path.logs");
     
    -        execute(terminal, options, settings);
    +        execute(terminal, options, createEnv(terminal, settings));
         }
     
    +    /** Create an {@link Environment} for the command to use. Overrideable for tests. */
    +    protected Environment createEnv(Terminal terminal, Map settings) {
    +        return InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings);
    +    }
    +
    +    /** Ensure the given setting exists, reading it from system properties if not already set. */
         protected static void putSystemPropertyIfSettingIsMissing(final Map settings, final String setting, final String key) {
             final String value = System.getProperty(key);
             if (value != null) {
    @@ -72,6 +82,7 @@ public abstract class SettingCommand extends Command {
             }
         }
     
    -    protected abstract void execute(Terminal terminal, OptionSet options, Map settings) throws Exception;
    +    /** Execute the command with the initialized {@link Environment}. */
    +    protected abstract void execute(Terminal terminal, OptionSet options, Environment env) throws Exception;
     
     }
    diff --git a/core/src/main/java/org/elasticsearch/cli/Terminal.java b/core/src/main/java/org/elasticsearch/cli/Terminal.java
    index 58eb5012d07..cd7fc76e681 100644
    --- a/core/src/main/java/org/elasticsearch/cli/Terminal.java
    +++ b/core/src/main/java/org/elasticsearch/cli/Terminal.java
    @@ -27,6 +27,7 @@ import java.io.IOException;
     import java.io.InputStreamReader;
     import java.io.PrintWriter;
     import java.nio.charset.Charset;
    +import java.util.Locale;
     
     /**
      * A Terminal wraps access to reading input and writing output for a cli.
    @@ -92,6 +93,26 @@ public abstract class Terminal {
             }
         }
     
    +    /**
    +     * Prompt for a yes or no answer from the user. This method will loop until 'y' or 'n'
    +     * (or the default empty value) is entered.
    +     */
    +    public final boolean promptYesNo(String prompt, boolean defaultYes) {
    +        String answerPrompt = defaultYes ? " [Y/n]" : " [y/N]";
    +        while (true) {
    +            String answer = readText(prompt + answerPrompt).toLowerCase(Locale.ROOT);
    +            if (answer.isEmpty()) {
    +                return defaultYes;
    +            }
    +            boolean answerYes = answer.equals("y");
    +            if (answerYes == false && answer.equals("n") == false) {
    +                println("Did not understand answer '" + answer + "'");
    +                continue;
    +            }
    +            return answerYes;
    +        }
    +    }
    +
         private static class ConsoleTerminal extends Terminal {
     
             private static final Console CONSOLE = System.console();
    diff --git a/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java
    index 14abc77513a..3f705a215ea 100644
    --- a/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java
    +++ b/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java
    @@ -112,6 +112,7 @@ import org.elasticsearch.action.ingest.SimulatePipelineResponse;
     import org.elasticsearch.action.ingest.WritePipelineResponse;
     import org.elasticsearch.common.Nullable;
     import org.elasticsearch.common.bytes.BytesReference;
    +import org.elasticsearch.common.xcontent.XContentType;
     import org.elasticsearch.tasks.TaskId;
     
     /**
    @@ -545,9 +546,16 @@ public interface ClusterAdminClient extends ElasticsearchClient {
     
         /**
          * Stores an ingest pipeline
    +     * @deprecated use {@link #preparePutPipeline(String, BytesReference, XContentType)}
          */
    +    @Deprecated
         PutPipelineRequestBuilder preparePutPipeline(String id, BytesReference source);
     
    +    /**
    +     * Stores an ingest pipeline
    +     */
    +    PutPipelineRequestBuilder preparePutPipeline(String id, BytesReference source, XContentType xContentType);
    +
         /**
          * Deletes a stored ingest pipeline
          */
    @@ -596,8 +604,14 @@ public interface ClusterAdminClient extends ElasticsearchClient {
         /**
          * Simulates an ingest pipeline
          */
    +    @Deprecated
         SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source);
     
    +    /**
    +     * Simulates an ingest pipeline
    +     */
    +    SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source, XContentType xContentType);
    +
         /**
          * Explain the allocation of a shard
          */
    diff --git a/core/src/main/java/org/elasticsearch/client/ElasticsearchClient.java b/core/src/main/java/org/elasticsearch/client/ElasticsearchClient.java
    index 84438ff6d1a..c6aa4991a82 100644
    --- a/core/src/main/java/org/elasticsearch/client/ElasticsearchClient.java
    +++ b/core/src/main/java/org/elasticsearch/client/ElasticsearchClient.java
    @@ -41,7 +41,7 @@ public interface ElasticsearchClient {
          * @return A future allowing to get back the response.
          */
         > ActionFuture execute(
    -            final Action action, final Request request);
    +            Action action, Request request);
     
         /**
          * Executes a generic action, denoted by an {@link Action}.
    @@ -54,7 +54,7 @@ public interface ElasticsearchClient {
          * @param  The request builder type.
          */
         > void execute(
    -            final Action action, final Request request, ActionListener listener);
    +            Action action, Request request, ActionListener listener);
     
         /**
          * Prepares a request builder to execute, specified by {@link Action}.
    @@ -66,7 +66,7 @@ public interface ElasticsearchClient {
          * @return The request builder, that can, at a later stage, execute the request.
          */
         > RequestBuilder prepareExecute(
    -            final Action action);
    +            Action action);
     
         /**
          * Returns the threadpool used to execute requests on this client
    diff --git a/core/src/main/java/org/elasticsearch/client/node/NodeClient.java b/core/src/main/java/org/elasticsearch/client/node/NodeClient.java
    index 6c3aa071ba3..e4f26b15702 100644
    --- a/core/src/main/java/org/elasticsearch/client/node/NodeClient.java
    +++ b/core/src/main/java/org/elasticsearch/client/node/NodeClient.java
    @@ -28,12 +28,14 @@ import org.elasticsearch.action.GenericAction;
     import org.elasticsearch.action.support.TransportAction;
     import org.elasticsearch.client.Client;
     import org.elasticsearch.client.support.AbstractClient;
    +import org.elasticsearch.cluster.node.DiscoveryNode;
     import org.elasticsearch.common.settings.Settings;
     import org.elasticsearch.tasks.Task;
     import org.elasticsearch.tasks.TaskListener;
     import org.elasticsearch.threadpool.ThreadPool;
     
     import java.util.Map;
    +import java.util.function.Supplier;
     
     /**
      * Client that executes actions on the local node.
    @@ -41,13 +43,19 @@ import java.util.Map;
     public class NodeClient extends AbstractClient {
     
         private Map actions;
    +    /**
    +     * The id of the local {@link DiscoveryNode}. Useful for generating task ids from tasks returned by
    +     * {@link #executeLocally(GenericAction, ActionRequest, TaskListener)}.
    +     */
    +    private Supplier localNodeId;
     
         public NodeClient(Settings settings, ThreadPool threadPool) {
             super(settings, threadPool);
         }
     
    -    public void initialize(Map actions) {
    +    public void initialize(Map actions, Supplier localNodeId) {
             this.actions = actions;
    +        this.localNodeId = localNodeId;
         }
     
         @Override
    @@ -85,6 +93,14 @@ public class NodeClient extends AbstractClient {
             return transportAction(action).execute(request, listener);
         }
     
    +    /**
    +     * The id of the local {@link DiscoveryNode}. Useful for generating task ids from tasks returned by
    +     * {@link #executeLocally(GenericAction, ActionRequest, TaskListener)}.
    +     */
    +    public String getLocalNodeId() {
    +        return localNodeId.get();
    +    }
    +
         /**
          * Get the {@link TransportAction} for an {@link Action}, throwing exceptions if the action isn't available.
          */
    diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java
    index 075fbf1fad6..e0ebcfe70a9 100644
    --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java
    +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java
    @@ -343,6 +343,7 @@ import org.elasticsearch.common.bytes.BytesReference;
     import org.elasticsearch.common.component.AbstractComponent;
     import org.elasticsearch.common.settings.Settings;
     import org.elasticsearch.common.util.concurrent.ThreadContext;
    +import org.elasticsearch.common.xcontent.XContentType;
     import org.elasticsearch.tasks.TaskId;
     import org.elasticsearch.threadpool.ThreadPool;
     
    @@ -400,7 +401,7 @@ public abstract class AbstractClient extends AbstractComponent implements Client
             doExecute(action, request, listener);
         }
     
    -    protected abstract > void doExecute(final Action action, final Request request, ActionListener listener);
    +    protected abstract > void doExecute(Action action, Request request, ActionListener listener);
     
         @Override
         public ActionFuture index(final IndexRequest request) {
    @@ -671,7 +672,7 @@ public abstract class AbstractClient extends AbstractComponent implements Client
             private final ClusterAdmin clusterAdmin;
             private final IndicesAdmin indicesAdmin;
     
    -        public Admin(ElasticsearchClient client) {
    +        Admin(ElasticsearchClient client) {
                 this.clusterAdmin = new ClusterAdmin(client);
                 this.indicesAdmin = new IndicesAdmin(client);
             }
    @@ -691,7 +692,7 @@ public abstract class AbstractClient extends AbstractComponent implements Client
     
             private final ElasticsearchClient client;
     
    -        public ClusterAdmin(ElasticsearchClient client) {
    +        ClusterAdmin(ElasticsearchClient client) {
                 this.client = client;
             }
     
    @@ -1081,6 +1082,11 @@ public abstract class AbstractClient extends AbstractComponent implements Client
                 return new PutPipelineRequestBuilder(this, PutPipelineAction.INSTANCE, id, source);
             }
     
    +        @Override
    +        public PutPipelineRequestBuilder preparePutPipeline(String id, BytesReference source, XContentType xContentType) {
    +            return new PutPipelineRequestBuilder(this, PutPipelineAction.INSTANCE, id, source, xContentType);
    +        }
    +
             @Override
             public void deletePipeline(DeletePipelineRequest request, ActionListener listener) {
                 execute(DeletePipelineAction.INSTANCE, request, listener);
    @@ -1131,6 +1137,11 @@ public abstract class AbstractClient extends AbstractComponent implements Client
                 return new SimulatePipelineRequestBuilder(this, SimulatePipelineAction.INSTANCE, source);
             }
     
    +        @Override
    +        public SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source, XContentType xContentType) {
    +            return new SimulatePipelineRequestBuilder(this, SimulatePipelineAction.INSTANCE, source, xContentType);
    +        }
    +
             @Override
             public void allocationExplain(ClusterAllocationExplainRequest request, ActionListener listener) {
                 execute(ClusterAllocationExplainAction.INSTANCE, request, listener);
    @@ -1199,7 +1210,7 @@ public abstract class AbstractClient extends AbstractComponent implements Client
     
             @Override
             public DeleteStoredScriptRequestBuilder prepareDeleteStoredScript(@Nullable String scriptLang, String id){
    -            return prepareDeleteStoredScript().setScriptLang(scriptLang).setId(id);
    +            return prepareDeleteStoredScript().setLang(scriptLang).setId(id);
             }
         }
     
    @@ -1207,7 +1218,7 @@ public abstract class AbstractClient extends AbstractComponent implements Client
     
             private final ElasticsearchClient client;
     
    -        public IndicesAdmin(ElasticsearchClient client) {
    +        IndicesAdmin(ElasticsearchClient client) {
                 this.client = client;
             }
     
    diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java
    index 4b9e4b1eac6..01fdf98ceb4 100644
    --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java
    +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java
    @@ -27,7 +27,9 @@ import org.elasticsearch.action.ActionRequest;
     import org.elasticsearch.action.ActionRequestBuilder;
     import org.elasticsearch.action.ActionResponse;
     import org.elasticsearch.client.support.AbstractClient;
    +import org.elasticsearch.cluster.ClusterModule;
     import org.elasticsearch.cluster.node.DiscoveryNode;
    +import org.elasticsearch.common.UUIDs;
     import org.elasticsearch.common.component.LifecycleComponent;
     import org.elasticsearch.common.inject.Injector;
     import org.elasticsearch.common.inject.Module;
    @@ -41,9 +43,10 @@ import org.elasticsearch.common.settings.SettingsModule;
     import org.elasticsearch.common.transport.TransportAddress;
     import org.elasticsearch.common.unit.TimeValue;
     import org.elasticsearch.common.util.BigArrays;
    +import org.elasticsearch.common.xcontent.NamedXContentRegistry;
     import org.elasticsearch.indices.breaker.CircuitBreakerService;
     import org.elasticsearch.node.Node;
    -import org.elasticsearch.node.internal.InternalSettingsPreparer;
    +import org.elasticsearch.node.InternalSettingsPreparer;
     import org.elasticsearch.plugins.ActionPlugin;
     import org.elasticsearch.plugins.NetworkPlugin;
     import org.elasticsearch.plugins.Plugin;
    @@ -63,8 +66,11 @@ import java.util.Collection;
     import java.util.Collections;
     import java.util.List;
     import java.util.concurrent.TimeUnit;
    +import java.util.function.Function;
     import java.util.stream.Collectors;
    +import java.util.stream.Stream;
     
    +import static java.util.stream.Collectors.toList;
     import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
     
     /**
    @@ -136,10 +142,16 @@ public abstract class TransportClient extends AbstractClient {
                 List entries = new ArrayList<>();
                 entries.addAll(NetworkModule.getNamedWriteables());
                 entries.addAll(searchModule.getNamedWriteables());
    +            entries.addAll(ClusterModule.getNamedWriteables());
                 entries.addAll(pluginsService.filterPlugins(Plugin.class).stream()
                                              .flatMap(p -> p.getNamedWriteables().stream())
                                              .collect(Collectors.toList()));
                 NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries);
    +            NamedXContentRegistry xContentRegistry = new NamedXContentRegistry(Stream.of(
    +                    searchModule.getNamedXContents().stream(),
    +                    pluginsService.filterPlugins(Plugin.class).stream()
    +                            .flatMap(p -> p.getNamedXContent().stream())
    +                    ).flatMap(Function.identity()).collect(toList()));
     
                 ModulesBuilder modules = new ModulesBuilder();
                 // plugin modules must be added here, before others or we can get crazy injection errors...
    @@ -147,8 +159,9 @@ public abstract class TransportClient extends AbstractClient {
                     modules.add(pluginModule);
                 }
                 modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
    -            ActionModule actionModule = new ActionModule(true, settings, null, settingsModule.getClusterSettings(),
    -                threadPool, pluginsService.filterPlugins(ActionPlugin.class));
    +            ActionModule actionModule = new ActionModule(true, settings, null, settingsModule.getIndexScopedSettings(),
    +                    settingsModule.getClusterSettings(), settingsModule.getSettingsFilter(), threadPool,
    +                    pluginsService.filterPlugins(ActionPlugin.class), null, null);
                 modules.add(actionModule);
     
                 CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
    @@ -158,10 +171,12 @@ public abstract class TransportClient extends AbstractClient {
                 resourcesToClose.add(bigArrays);
                 modules.add(settingsModule);
                 NetworkModule networkModule = new NetworkModule(settings, true, pluginsService.filterPlugins(NetworkPlugin.class), threadPool,
    -                bigArrays, circuitBreakerService, namedWriteableRegistry, networkService);
    +                bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, null);
                 final Transport transport = networkModule.getTransportSupplier().get();
                 final TransportService transportService = new TransportService(settings, transport, threadPool,
    -                networkModule.getTransportInterceptor(), null);
    +                networkModule.getTransportInterceptor(),
    +                boundTransportAddress -> DiscoveryNode.createLocal(settings, new TransportAddress(TransportAddress.META_ADDRESS, 0),
    +                    UUIDs.randomBase64UUID()), null);
                 modules.add((b -> {
                     b.bind(BigArrays.class).toInstance(bigArrays);
                     b.bind(PluginsService.class).toInstance(pluginsService);
    diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java
    index 350a35b6e49..ea2906dab67 100644
    --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java
    +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java
    @@ -101,6 +101,21 @@ final class TransportClientNodesService extends AbstractComponent implements Clo
     
         private final TransportClient.HostFailureListener hostFailureListener;
     
    +    // TODO: migrate this to use low level connections and single type channels
    +    /** {@link ConnectionProfile} to use when to connecting to the listed nodes and doing a liveness check */
    +    private static final ConnectionProfile LISTED_NODES_PROFILE;
    +
    +    static {
    +        ConnectionProfile.Builder builder = new ConnectionProfile.Builder();
    +        builder.addConnections(1,
    +            TransportRequestOptions.Type.BULK,
    +            TransportRequestOptions.Type.PING,
    +            TransportRequestOptions.Type.RECOVERY,
    +            TransportRequestOptions.Type.REG,
    +            TransportRequestOptions.Type.STATE);
    +        LISTED_NODES_PROFILE = builder.build();
    +    }
    +
         TransportClientNodesService(Settings settings, TransportService transportService,
                                            ThreadPool threadPool, TransportClient.HostFailureListener hostFailureListener) {
             super(settings);
    @@ -249,7 +264,7 @@ final class TransportClientNodesService extends AbstractComponent implements Clo
     
             private volatile int i;
     
    -        public RetryListener(NodeListenerCallback callback, ActionListener listener,
    +        RetryListener(NodeListenerCallback callback, ActionListener listener,
                                  List nodes, int index, TransportClient.HostFailureListener hostFailureListener) {
                 this.callback = callback;
                 this.listener = listener;
    @@ -389,8 +404,8 @@ final class TransportClientNodesService extends AbstractComponent implements Clo
                     if (!transportService.nodeConnected(listedNode)) {
                         try {
                             // its a listed node, light connect to it...
    -                        logger.trace("connecting to listed node (light) [{}]", listedNode);
    -                        transportService.connectToNode(listedNode, ConnectionProfile.LIGHT_PROFILE);
    +                        logger.trace("connecting to listed node [{}]", listedNode);
    +                        transportService.connectToNode(listedNode, LISTED_NODES_PROFILE);
                         } catch (Exception e) {
                             logger.info(
                                 (Supplier)
    @@ -470,7 +485,7 @@ final class TransportClientNodesService extends AbstractComponent implements Clo
                                         } else {
                                             // its a listed node, light connect to it...
                                             logger.trace("connecting to listed node (light) [{}]", listedNode);
    -                                        transportService.connectToNode(listedNode, ConnectionProfile.LIGHT_PROFILE);
    +                                        transportService.connectToNode(listedNode, LISTED_NODES_PROFILE);
                                         }
                                     } catch (Exception e) {
                                         logger.debug(
    diff --git a/core/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java b/core/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java
    index f9d5f33cad6..8e63bc2b9d7 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java
    @@ -40,12 +40,7 @@ public abstract class AbstractDiffable> implements Diffabl
             }
         }
     
    -    @Override
    -    public Diff readDiffFrom(StreamInput in) throws IOException {
    -        return new CompleteDiff<>(this, in);
    -    }
    -
    -    public static > Diff readDiffFrom(T reader, StreamInput in) throws IOException {
    +    public static > Diff readDiffFrom(Reader reader, StreamInput in) throws IOException {
             return new CompleteDiff(reader, in);
         }
     
    @@ -57,23 +52,23 @@ public abstract class AbstractDiffable> implements Diffabl
             /**
              * Creates simple diff with changes
              */
    -        public CompleteDiff(T part) {
    +        CompleteDiff(T part) {
                 this.part = part;
             }
     
             /**
              * Creates simple diff without changes
              */
    -        public CompleteDiff() {
    +        CompleteDiff() {
                 this.part = null;
             }
     
             /**
              * Read simple diff from the stream
              */
    -        public CompleteDiff(Diffable reader, StreamInput in) throws IOException {
    +        CompleteDiff(Reader reader, StreamInput in) throws IOException {
                 if (in.readBoolean()) {
    -                this.part = reader.readFrom(in);
    +                this.part = reader.read(in);
                 } else {
                     this.part = null;
                 }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java b/core/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java
    new file mode 100644
    index 00000000000..fb253a1a5df
    --- /dev/null
    +++ b/core/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java
    @@ -0,0 +1,133 @@
    +/*
    + * Licensed to Elasticsearch under one or more contributor
    + * license agreements. See the NOTICE file distributed with
    + * this work for additional information regarding copyright
    + * ownership. Elasticsearch licenses this file to you under
    + * the Apache License, Version 2.0 (the "License"); you may
    + * not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing,
    + * software distributed under the License is distributed on an
    + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    + * KIND, either express or implied.  See the License for the
    + * specific language governing permissions and limitations
    + * under the License.
    + */
    +
    +package org.elasticsearch.cluster;
    +
    +import org.elasticsearch.Version;
    +import org.elasticsearch.common.Nullable;
    +import org.elasticsearch.common.io.stream.NamedWriteable;
    +import org.elasticsearch.common.io.stream.StreamInput;
    +import org.elasticsearch.common.io.stream.StreamOutput;
    +
    +import java.io.IOException;
    +
    +/**
    + * Abstract diffable object with simple diffs implementation that sends the entire object if object has changed or
    + * nothing is object remained the same. Comparing to AbstractDiffable, this class also works with NamedWriteables
    + */
    +public abstract class AbstractNamedDiffable> implements Diffable, NamedWriteable {
    +
    +    @Override
    +    public Diff diff(T previousState) {
    +        if (this.get().equals(previousState)) {
    +            return new CompleteNamedDiff<>(previousState.getWriteableName(), previousState.getMinimalSupportedVersion());
    +        } else {
    +            return new CompleteNamedDiff<>(get());
    +        }
    +    }
    +
    +    public static > NamedDiff readDiffFrom(Class tClass, String name, StreamInput in)
    +        throws IOException {
    +        return new CompleteNamedDiff<>(tClass, name, in);
    +    }
    +
    +    private static class CompleteNamedDiff> implements NamedDiff {
    +
    +        @Nullable
    +        private final T part;
    +
    +        private final String name;
    +
    +        /**
    +         * A non-null value is only required for write operation, if the diff was just read from the stream the version
    +         * is unnecessary.
    +         */
    +        @Nullable
    +        private final Version minimalSupportedVersion;
    +
    +        /**
    +         * Creates simple diff with changes
    +         */
    +        CompleteNamedDiff(T part) {
    +            this.part = part;
    +            this.name = part.getWriteableName();
    +            this.minimalSupportedVersion = part.getMinimalSupportedVersion();
    +        }
    +
    +        /**
    +         * Creates simple diff without changes
    +         */
    +        CompleteNamedDiff(String name, Version minimalSupportedVersion) {
    +            this.part = null;
    +            this.name = name;
    +            this.minimalSupportedVersion = minimalSupportedVersion;
    +        }
    +
    +        /**
    +         * Read simple diff from the stream
    +         */
    +        CompleteNamedDiff(Class tClass, String name, StreamInput in) throws IOException {
    +            if (in.readBoolean()) {
    +                this.part = in.readNamedWriteable(tClass, name);
    +                this.minimalSupportedVersion = part.getMinimalSupportedVersion();
    +            } else {
    +                this.part = null;
    +                this.minimalSupportedVersion = null; // We just read this diff, so it's not going to be written
    +            }
    +            this.name = name;
    +        }
    +
    +        @Override
    +        public void writeTo(StreamOutput out) throws IOException {
    +            assert minimalSupportedVersion != null : "shouldn't be called on diff that was de-serialized from the stream";
    +            if (part != null) {
    +                out.writeBoolean(true);
    +                part.writeTo(out);
    +            } else {
    +                out.writeBoolean(false);
    +            }
    +        }
    +
    +        @Override
    +        public T apply(T part) {
    +            if (this.part != null) {
    +                return this.part;
    +            } else {
    +                return part;
    +            }
    +        }
    +
    +        @Override
    +        public String getWriteableName() {
    +            return name;
    +        }
    +
    +        @Override
    +        public Version getMinimalSupportedVersion() {
    +            assert minimalSupportedVersion != null : "shouldn't be called on the diff that was de-serialized from the stream";
    +            return minimalSupportedVersion;
    +        }
    +    }
    +
    +    @SuppressWarnings("unchecked")
    +    public T get() {
    +        return (T) this;
    +    }
    +
    +}
    diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java
    index 930991c443b..8fe8942662a 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java
    @@ -22,7 +22,9 @@ package org.elasticsearch.cluster;
     import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
     import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
     import org.elasticsearch.cluster.action.shard.ShardStateAction;
    +import org.elasticsearch.cluster.metadata.IndexGraveyard;
     import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
    +import org.elasticsearch.cluster.metadata.MetaData;
     import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
     import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
     import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService;
    @@ -30,6 +32,7 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
     import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
     import org.elasticsearch.cluster.metadata.MetaDataMappingService;
     import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
    +import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
     import org.elasticsearch.cluster.routing.DelayedAllocationService;
     import org.elasticsearch.cluster.routing.RoutingService;
     import org.elasticsearch.cluster.routing.allocation.AllocationService;
    @@ -52,15 +55,25 @@ import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocatio
     import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
     import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
     import org.elasticsearch.cluster.service.ClusterService;
    +import org.elasticsearch.common.ParseField;
     import org.elasticsearch.common.inject.AbstractModule;
    +import org.elasticsearch.common.io.stream.NamedWriteable;
    +import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
    +import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
    +import org.elasticsearch.common.io.stream.Writeable;
    +import org.elasticsearch.common.io.stream.Writeable.Reader;
     import org.elasticsearch.common.settings.ClusterSettings;
     import org.elasticsearch.common.settings.Setting;
     import org.elasticsearch.common.settings.Setting.Property;
     import org.elasticsearch.common.settings.Settings;
    +import org.elasticsearch.common.xcontent.NamedXContentRegistry;
     import org.elasticsearch.gateway.GatewayAllocator;
    +import org.elasticsearch.ingest.IngestMetadata;
     import org.elasticsearch.plugins.ClusterPlugin;
    +import org.elasticsearch.script.ScriptMetaData;
     import org.elasticsearch.tasks.TaskResultsService;
     
    +import java.util.ArrayList;
     import java.util.Collection;
     import java.util.HashMap;
     import java.util.LinkedHashMap;
    @@ -94,6 +107,52 @@ public class ClusterModule extends AbstractModule {
             indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
         }
     
    +
    +    public static List getNamedWriteables() {
    +        List entries = new ArrayList<>();
    +        // Cluster State
    +        registerClusterCustom(entries, SnapshotsInProgress.TYPE, SnapshotsInProgress::new, SnapshotsInProgress::readDiffFrom);
    +        registerClusterCustom(entries, RestoreInProgress.TYPE, RestoreInProgress::new, RestoreInProgress::readDiffFrom);
    +        registerClusterCustom(entries, SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress::new,
    +            SnapshotDeletionsInProgress::readDiffFrom);
    +        // Metadata
    +        registerMetaDataCustom(entries, RepositoriesMetaData.TYPE, RepositoriesMetaData::new, RepositoriesMetaData::readDiffFrom);
    +        registerMetaDataCustom(entries, IngestMetadata.TYPE, IngestMetadata::new, IngestMetadata::readDiffFrom);
    +        registerMetaDataCustom(entries, ScriptMetaData.TYPE, ScriptMetaData::new, ScriptMetaData::readDiffFrom);
    +        registerMetaDataCustom(entries, IndexGraveyard.TYPE, IndexGraveyard::new, IndexGraveyard::readDiffFrom);
    +        return entries;
    +    }
    +
    +    public static List getNamedXWriteables() {
    +        List entries = new ArrayList<>();
    +        // Metadata
    +        entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(RepositoriesMetaData.TYPE),
    +            RepositoriesMetaData::fromXContent));
    +        entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(IngestMetadata.TYPE),
    +            IngestMetadata::fromXContent));
    +        entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(ScriptMetaData.TYPE),
    +            ScriptMetaData::fromXContent));
    +        entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(IndexGraveyard.TYPE),
    +            IndexGraveyard::fromXContent));
    +        return entries;
    +    }
    +
    +    private static  void registerClusterCustom(List entries, String name, Reader reader,
    +                                                                       Reader diffReader) {
    +        registerCustom(entries, ClusterState.Custom.class, name, reader, diffReader);
    +    }
    +
    +    private static  void registerMetaDataCustom(List entries, String name, Reader reader,
    +                                                                       Reader diffReader) {
    +        registerCustom(entries, MetaData.Custom.class, name, reader, diffReader);
    +    }
    +
    +    private static  void registerCustom(List entries, Class category, String name,
    +                                                                  Reader reader, Reader diffReader) {
    +        entries.add(new Entry(category, name, reader));
    +        entries.add(new Entry(NamedDiff.class, name, diffReader));
    +    }
    +
         public IndexNameExpressionResolver getIndexNameExpressionResolver() {
             return indexNameExpressionResolver;
         }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
    index 7b6f2b55aa9..c9ef935224c 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
    @@ -22,6 +22,7 @@ package org.elasticsearch.cluster;
     import com.carrotsearch.hppc.cursors.IntObjectCursor;
     import com.carrotsearch.hppc.cursors.ObjectCursor;
     import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
    +import org.elasticsearch.Version;
     import org.elasticsearch.cluster.block.ClusterBlock;
     import org.elasticsearch.cluster.block.ClusterBlocks;
     import org.elasticsearch.cluster.metadata.IndexMetaData;
    @@ -37,20 +38,21 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
     import org.elasticsearch.cluster.routing.RoutingTable;
     import org.elasticsearch.cluster.routing.ShardRouting;
     import org.elasticsearch.cluster.service.ClusterService;
    -import org.elasticsearch.common.Nullable;
     import org.elasticsearch.common.Strings;
     import org.elasticsearch.common.UUIDs;
    +import org.elasticsearch.common.bytes.BytesArray;
     import org.elasticsearch.common.bytes.BytesReference;
     import org.elasticsearch.common.collect.ImmutableOpenMap;
     import org.elasticsearch.common.compress.CompressedXContent;
     import org.elasticsearch.common.io.stream.BytesStreamOutput;
    +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
    +import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
     import org.elasticsearch.common.io.stream.StreamInput;
     import org.elasticsearch.common.io.stream.StreamOutput;
     import org.elasticsearch.common.settings.Settings;
     import org.elasticsearch.common.xcontent.ToXContent;
     import org.elasticsearch.common.xcontent.XContentBuilder;
    -import org.elasticsearch.common.xcontent.XContentFactory;
    -import org.elasticsearch.common.xcontent.XContentParser;
    +import org.elasticsearch.common.xcontent.XContentHelper;
     import org.elasticsearch.discovery.Discovery;
     import org.elasticsearch.discovery.zen.PublishClusterStateAction;
     
    @@ -86,36 +88,12 @@ import java.util.Set;
      */
     public class ClusterState implements ToXContent, Diffable {
     
    -    public static final ClusterState PROTO = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build();
    +    public static final ClusterState EMPTY_STATE = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build();
     
    -    public interface Custom extends Diffable, ToXContent {
    -
    -        String type();
    +    public interface Custom extends NamedDiffable, ToXContent {
         }
     
    -    private static final Map customPrototypes = new HashMap<>();
    -
    -    /**
    -     * Register a custom index meta data factory. Make sure to call it from a static block.
    -     */
    -    public static void registerPrototype(String type, Custom proto) {
    -        customPrototypes.put(type, proto);
    -    }
    -
    -    static {
    -        // register non plugin custom parts
    -        registerPrototype(SnapshotsInProgress.TYPE, SnapshotsInProgress.PROTO);
    -        registerPrototype(RestoreInProgress.TYPE, RestoreInProgress.PROTO);
    -    }
    -
    -    public static  T lookupPrototype(String type) {
    -        @SuppressWarnings("unchecked")
    -        T proto = (T) customPrototypes.get(type);
    -        if (proto == null) {
    -            throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "], node likely missing plugins");
    -        }
    -        return proto;
    -    }
    +    private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class);
     
         public static final String UNKNOWN_UUID = "_na_";
     
    @@ -403,11 +381,7 @@ public class ClusterState implements ToXContent, Diffable {
     
                     builder.startObject("mappings");
                     for (ObjectObjectCursor cursor1 : templateMetaData.mappings()) {
    -                    byte[] mappingSource = cursor1.value.uncompressed();
    -                    Map mapping;
    -                    try (XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource)) {
    -                        mapping = parser.map();
    -                    }
    +                    Map mapping = XContentHelper.convertToMap(new BytesArray(cursor1.value.uncompressed()), false).v2();
                         if (mapping.size() == 1 && mapping.containsKey(cursor1.key)) {
                             // the type name is the root value, reduce it
                             mapping = (Map) mapping.get(cursor1.key);
    @@ -435,11 +409,8 @@ public class ClusterState implements ToXContent, Diffable {
     
                     builder.startObject("mappings");
                     for (ObjectObjectCursor cursor : indexMetaData.getMappings()) {
    -                    byte[] mappingSource = cursor.value.source().uncompressed();
    -                    Map mapping;
    -                    try (XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource)) {
    -                        mapping = parser.map();
    -                    }
    +                    Map mapping = XContentHelper
    +                            .convertToMap(new BytesArray(cursor.value.source().uncompressed()), false).v2();
                         if (mapping.size() == 1 && mapping.containsKey(cursor.key)) {
                             // the type name is the root value, reduce it
                             mapping = (Map) mapping.get(cursor.key);
    @@ -665,53 +636,39 @@ public class ClusterState implements ToXContent, Diffable {
              * @param data      input bytes
              * @param localNode used to set the local node in the cluster state.
              */
    -        public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException {
    -            return readFrom(StreamInput.wrap(data), localNode);
    +        public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode, NamedWriteableRegistry registry) throws IOException {
    +            StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(data), registry);
    +            return readFrom(in, localNode);
     
             }
    -
    -        /**
    -         * @param in        input stream
    -         * @param localNode used to set the local node in the cluster state. can be null.
    -         */
    -        public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
    -            return PROTO.readFrom(in, localNode);
    -        }
         }
     
         @Override
    -    public Diff diff(ClusterState previousState) {
    +    public Diff diff(ClusterState previousState) {
             return new ClusterStateDiff(previousState, this);
         }
     
    -    @Override
    -    public Diff readDiffFrom(StreamInput in) throws IOException {
    -        return new ClusterStateDiff(in, this);
    +    public static Diff readDiffFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
    +        return new ClusterStateDiff(in, localNode);
         }
     
    -    public ClusterState readFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
    +    public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
             ClusterName clusterName = new ClusterName(in);
             Builder builder = new Builder(clusterName);
             builder.version = in.readLong();
             builder.uuid = in.readString();
    -        builder.metaData = MetaData.Builder.readFrom(in);
    -        builder.routingTable = RoutingTable.Builder.readFrom(in);
    -        builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode);
    -        builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in);
    +        builder.metaData = MetaData.readFrom(in);
    +        builder.routingTable = RoutingTable.readFrom(in);
    +        builder.nodes = DiscoveryNodes.readFrom(in, localNode);
    +        builder.blocks = new ClusterBlocks(in);
             int customSize = in.readVInt();
             for (int i = 0; i < customSize; i++) {
    -            String type = in.readString();
    -            Custom customIndexMetaData = lookupPrototype(type).readFrom(in);
    -            builder.putCustom(type, customIndexMetaData);
    +            Custom customIndexMetaData = in.readNamedWriteable(Custom.class);
    +            builder.putCustom(customIndexMetaData.getWriteableName(), customIndexMetaData);
             }
             return builder.build();
         }
     
    -    @Override
    -    public ClusterState readFrom(StreamInput in) throws IOException {
    -        return readFrom(in, nodes.getLocalNode());
    -    }
    -
         @Override
         public void writeTo(StreamOutput out) throws IOException {
             clusterName.writeTo(out);
    @@ -721,10 +678,18 @@ public class ClusterState implements ToXContent, Diffable {
             routingTable.writeTo(out);
             nodes.writeTo(out);
             blocks.writeTo(out);
    -        out.writeVInt(customs.size());
    -        for (ObjectObjectCursor cursor : customs) {
    -            out.writeString(cursor.key);
    -            cursor.value.writeTo(out);
    +        // filter out custom states not supported by the other node
    +        int numberOfCustoms = 0;
    +        for (ObjectCursor cursor : customs.values()) {
    +            if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) {
    +                numberOfCustoms++;
    +            }
    +        }
    +        out.writeVInt(numberOfCustoms);
    +        for (ObjectCursor cursor : customs.values()) {
    +            if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) {
    +                out.writeNamedWriteable(cursor.value);
    +            }
             }
         }
     
    @@ -748,7 +713,7 @@ public class ClusterState implements ToXContent, Diffable {
     
             private final Diff> customs;
     
    -        public ClusterStateDiff(ClusterState before, ClusterState after) {
    +        ClusterStateDiff(ClusterState before, ClusterState after) {
                 fromUuid = before.stateUUID;
                 toUuid = after.stateUUID;
                 toVersion = after.version;
    @@ -757,30 +722,19 @@ public class ClusterState implements ToXContent, Diffable {
                 nodes = after.nodes.diff(before.nodes);
                 metaData = after.metaData.diff(before.metaData);
                 blocks = after.blocks.diff(before.blocks);
    -            customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
    +            customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER);
             }
     
    -        public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException {
    +        ClusterStateDiff(StreamInput in, DiscoveryNode localNode) throws IOException {
                 clusterName = new ClusterName(in);
                 fromUuid = in.readString();
                 toUuid = in.readString();
                 toVersion = in.readLong();
    -            routingTable = proto.routingTable.readDiffFrom(in);
    -            nodes = proto.nodes.readDiffFrom(in);
    -            metaData = proto.metaData.readDiffFrom(in);
    -            blocks = proto.blocks.readDiffFrom(in);
    -            customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
    -                new DiffableUtils.DiffableValueSerializer() {
    -                    @Override
    -                    public Custom read(StreamInput in, String key) throws IOException {
    -                        return lookupPrototype(key).readFrom(in);
    -                    }
    -
    -                    @Override
    -                    public Diff readDiff(StreamInput in, String key) throws IOException {
    -                        return lookupPrototype(key).readDiffFrom(in);
    -                    }
    -                });
    +            routingTable = RoutingTable.readDiffFrom(in);
    +            nodes = DiscoveryNodes.readDiffFrom(in, localNode);
    +            metaData = MetaData.readDiffFrom(in);
    +            blocks = ClusterBlocks.readDiffFrom(in);
    +            customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER);
             }
     
             @Override
    diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java
    index 6ca815b2fab..96177f7b7d8 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java
    @@ -26,8 +26,10 @@ import org.elasticsearch.common.Nullable;
     import org.elasticsearch.common.unit.TimeValue;
     import org.elasticsearch.common.util.concurrent.ThreadContext;
     
    +import java.util.Objects;
     import java.util.concurrent.atomic.AtomicReference;
     import java.util.function.Predicate;
    +import java.util.function.Supplier;
     
     /**
      * A utility class which simplifies interacting with the cluster state in cases where
    @@ -45,7 +47,7 @@ public class ClusterStateObserver {
         volatile TimeValue timeOutValue;
     
     
    -    final AtomicReference lastObservedState;
    +    final AtomicReference lastObservedState;
         final TimeoutClusterStateListener clusterStateListener = new ObserverClusterStateListener();
         // observingContext is not null when waiting on cluster state changes
         final AtomicReference observingContext = new AtomicReference<>(null);
    @@ -73,7 +75,7 @@ public class ClusterStateObserver {
         public ClusterStateObserver(ClusterState initialState, ClusterService clusterService, @Nullable TimeValue timeout, Logger logger,
                                     ThreadContext contextHolder) {
             this.clusterService = clusterService;
    -        this.lastObservedState = new AtomicReference<>(initialState);
    +        this.lastObservedState = new AtomicReference<>(new StoredState(initialState));
             this.timeOutValue = timeout;
             if (timeOutValue != null) {
                 this.startTimeNS = System.nanoTime();
    @@ -82,11 +84,14 @@ public class ClusterStateObserver {
             this.contextHolder = contextHolder;
         }
     
    -    /** last cluster state and status observed by this observer. Note that this may not be the current one */
    -    public ClusterState observedState() {
    -        ClusterState state = lastObservedState.get();
    -        assert state != null;
    -        return state;
    +    /** sets the last observed state to the currently applied cluster state and returns it */
    +    public ClusterState setAndGetObservedState() {
    +        if (observingContext.get() != null) {
    +            throw new ElasticsearchException("cannot set current cluster state while waiting for a cluster state change");
    +        }
    +        ClusterState clusterState = clusterService.state();
    +        lastObservedState.set(new StoredState(clusterState));
    +        return clusterState;
         }
     
         /** indicates whether this observer has timedout */
    @@ -114,7 +119,7 @@ public class ClusterStateObserver {
          * @param timeOutValue    a timeout for waiting. If null the global observer timeout will be used.
          */
         public void waitForNextChange(Listener listener, Predicate statePredicate, @Nullable TimeValue timeOutValue) {
    -
    +        listener = new ContextPreservingListener(listener, contextHolder.newRestorableContext(false));
             if (observingContext.get() != null) {
                 throw new ElasticsearchException("already waiting for a cluster state change");
             }
    @@ -130,7 +135,7 @@ public class ClusterStateObserver {
                         logger.trace("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
                         // update to latest, in case people want to retry
                         timedOut = true;
    -                    lastObservedState.set(clusterService.state());
    +                    lastObservedState.set(new StoredState(clusterService.state()));
                         listener.onTimeout(timeOutValue);
                         return;
                     }
    @@ -146,16 +151,14 @@ public class ClusterStateObserver {
     
             // sample a new state
             ClusterState newState = clusterService.state();
    -        ClusterState lastState = lastObservedState.get();
    -        if (newState != lastState && statePredicate.test(newState)) {
    +        if (lastObservedState.get().sameState(newState) == false && statePredicate.test(newState)) {
                 // good enough, let's go.
                 logger.trace("observer: sampled state accepted by predicate ({})", newState);
    -            lastObservedState.set(newState);
    +            lastObservedState.set(new StoredState(newState));
                 listener.onNewClusterState(newState);
             } else {
                 logger.trace("observer: sampled state rejected by predicate ({}). adding listener to ClusterService", newState);
    -            ObservingContext context =
    -                new ObservingContext(new ContextPreservingListener(listener, contextHolder.newStoredContext()), statePredicate);
    +            final ObservingContext context = new ObservingContext(listener, statePredicate);
                 if (!observingContext.compareAndSet(null, context)) {
                     throw new ElasticsearchException("already waiting for a cluster state change");
                 }
    @@ -177,7 +180,7 @@ public class ClusterStateObserver {
                     if (observingContext.compareAndSet(context, null)) {
                         clusterService.removeTimeoutListener(this);
                         logger.trace("observer: accepting cluster state change ({})", state);
    -                    lastObservedState.set(state);
    +                    lastObservedState.set(new StoredState(state));
                         context.listener.onNewClusterState(state);
                     } else {
                         logger.trace("observer: predicate approved change but observing context has changed - ignoring (new cluster state version [{}])", state.version());
    @@ -195,13 +198,12 @@ public class ClusterStateObserver {
                     return;
                 }
                 ClusterState newState = clusterService.state();
    -            ClusterState lastState = lastObservedState.get();
    -            if (newState != lastState && context.statePredicate.test(newState)) {
    +            if (lastObservedState.get().sameState(newState) == false && context.statePredicate.test(newState)) {
                     // double check we're still listening
                     if (observingContext.compareAndSet(context, null)) {
                         logger.trace("observer: post adding listener: accepting current cluster state ({})", newState);
                         clusterService.removeTimeoutListener(this);
    -                    lastObservedState.set(newState);
    +                    lastObservedState.set(new StoredState(newState));
                         context.listener.onNewClusterState(newState);
                     } else {
                         logger.trace("observer: postAdded - predicate approved state but observing context has changed - ignoring ({})", newState);
    @@ -230,13 +232,30 @@ public class ClusterStateObserver {
                     long timeSinceStartMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeNS);
                     logger.trace("observer: timeout notification from cluster service. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
                     // update to latest, in case people want to retry
    -                lastObservedState.set(clusterService.state());
    +                lastObservedState.set(new StoredState(clusterService.state()));
                     timedOut = true;
                     context.listener.onTimeout(timeOutValue);
                 }
             }
         }
     
    +    /**
    +     * The observer considers two cluster states to be the same if they have the same version and master node id (i.e. null or set)
    +     */
    +    private static class StoredState {
    +        private final String masterNodeId;
    +        private final long version;
    +
    +        StoredState(ClusterState clusterState) {
    +            this.masterNodeId = clusterState.nodes().getMasterNodeId();
    +            this.version = clusterState.version();
    +        }
    +
    +        public boolean sameState(ClusterState clusterState) {
    +            return version == clusterState.version() && Objects.equals(masterNodeId, clusterState.nodes().getMasterNodeId());
    +        }
    +    }
    +
         public interface Listener {
     
             /** called when a new state is observed */
    @@ -252,7 +271,7 @@ public class ClusterStateObserver {
             public final Listener listener;
             public final Predicate statePredicate;
     
    -        public ObservingContext(Listener listener, Predicate statePredicate) {
    +        ObservingContext(Listener listener, Predicate statePredicate) {
                 this.listener = listener;
                 this.statePredicate = statePredicate;
             }
    @@ -260,30 +279,33 @@ public class ClusterStateObserver {
     
         private static final class ContextPreservingListener implements Listener {
             private final Listener delegate;
    -        private final ThreadContext.StoredContext tempContext;
    +        private final Supplier contextSupplier;
     
     
    -        private ContextPreservingListener(Listener delegate, ThreadContext.StoredContext storedContext) {
    -            this.tempContext = storedContext;
    +        private ContextPreservingListener(Listener delegate, Supplier contextSupplier) {
    +            this.contextSupplier = contextSupplier;
                 this.delegate = delegate;
             }
     
             @Override
             public void onNewClusterState(ClusterState state) {
    -            tempContext.restore();
    -            delegate.onNewClusterState(state);
    +            try (ThreadContext.StoredContext context  = contextSupplier.get()) {
    +                delegate.onNewClusterState(state);
    +            }
             }
     
             @Override
             public void onClusterServiceClose() {
    -            tempContext.restore();
    -            delegate.onClusterServiceClose();
    +            try (ThreadContext.StoredContext context  = contextSupplier.get()) {
    +                delegate.onClusterServiceClose();
    +            }
             }
     
             @Override
             public void onTimeout(TimeValue timeout) {
    -            tempContext.restore();
    -            delegate.onTimeout(timeout);
    +            try (ThreadContext.StoredContext context  = contextSupplier.get()) {
    +                delegate.onTimeout(timeout);
    +            }
             }
         }
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java
    index e5493eaa955..3693447cfb6 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java
    @@ -18,6 +18,8 @@
      */
     package org.elasticsearch.cluster;
     
    +import org.elasticsearch.common.Nullable;
    +
     import java.util.IdentityHashMap;
     import java.util.List;
     import java.util.Map;
    @@ -27,10 +29,10 @@ public interface ClusterStateTaskExecutor {
          * Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state
          * should be changed.
          */
    -    BatchResult execute(ClusterState currentState, List tasks) throws Exception;
    +    ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception;
     
         /**
    -     * indicates whether this task should only run if current node is master
    +     * indicates whether this executor should only run if the current node is master
          */
         default boolean runOnlyOnMaster() {
             return true;
    @@ -68,18 +70,22 @@ public interface ClusterStateTaskExecutor {
          * Represents the result of a batched execution of cluster state update tasks
          * @param  the type of the cluster state update task
          */
    -    class BatchResult {
    +    class ClusterTasksResult {
    +        public final boolean noMaster;
    +        @Nullable
             public final ClusterState resultingState;
             public final Map executionResults;
     
             /**
              * Construct an execution result instance with a correspondence between the tasks and their execution result
    +         * @param noMaster whether this node steps down as master or has lost connection to the master
              * @param resultingState the resulting cluster state
              * @param executionResults the correspondence between tasks and their outcome
              */
    -        BatchResult(ClusterState resultingState, Map executionResults) {
    +        ClusterTasksResult(boolean noMaster, ClusterState resultingState, Map executionResults) {
                 this.resultingState = resultingState;
                 this.executionResults = executionResults;
    +            this.noMaster = noMaster;
             }
     
             public static  Builder builder() {
    @@ -117,8 +123,13 @@ public interface ClusterStateTaskExecutor {
                     return this;
                 }
     
    -            public BatchResult build(ClusterState resultingState) {
    -                return new BatchResult<>(resultingState, executionResults);
    +            public ClusterTasksResult build(ClusterState resultingState) {
    +                return new ClusterTasksResult<>(false, resultingState, executionResults);
    +            }
    +
    +            ClusterTasksResult build(ClusterTasksResult result, ClusterState previousState) {
    +                return new ClusterTasksResult<>(result.noMaster, result.resultingState == null ? previousState : result.resultingState,
    +                    executionResults);
                 }
             }
         }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java
    index a679d098616..b298e7e915d 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java
    @@ -28,7 +28,7 @@ import java.util.List;
     /**
      * A task that can update the cluster state.
      */
    -public abstract  class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener {
    +public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener {
     
         private final Priority priority;
     
    @@ -41,9 +41,9 @@ public abstract  class ClusterStateUpdateTask implements ClusterStateTaskConfig,
         }
     
         @Override
    -    public final BatchResult execute(ClusterState currentState, List tasks) throws Exception {
    +    public final ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception {
             ClusterState result = execute(currentState);
    -        return BatchResult.builder().successes(tasks).build(result);
    +        return ClusterTasksResult.builder().successes(tasks).build(result);
         }
     
         @Override
    @@ -75,4 +75,13 @@ public abstract  class ClusterStateUpdateTask implements ClusterStateTaskConfig,
         public Priority priority() {
             return priority;
         }
    +
    +    /**
    +     * Marked as final as cluster state update tasks should only run on master.
    +     * For local requests, use {@link LocalClusterUpdateTask} instead.
    +     */
    +    @Override
    +    public final boolean runOnlyOnMaster() {
    +        return true;
    +    }
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/Diff.java b/core/src/main/java/org/elasticsearch/cluster/Diff.java
    index 76535a4b763..4e980e36868 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/Diff.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/Diff.java
    @@ -19,22 +19,15 @@
     
     package org.elasticsearch.cluster;
     
    -import org.elasticsearch.common.io.stream.StreamOutput;
    -
    -import java.io.IOException;
    +import org.elasticsearch.common.io.stream.Writeable;
     
     /**
      * Represents difference between states of cluster state parts
      */
    -public interface Diff {
    +public interface Diff extends Writeable {
     
         /**
          * Applies difference to the specified part and returns the resulted part
          */
         T apply(T part);
    -
    -    /**
    -     * Writes the differences into the output stream
    -     */
    -    void writeTo(StreamOutput out) throws IOException;
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/Diffable.java b/core/src/main/java/org/elasticsearch/cluster/Diffable.java
    index b039f5e9b8b..57d5ea9ed1f 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/Diffable.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/Diffable.java
    @@ -34,13 +34,4 @@ public interface Diffable extends Writeable {
          */
         Diff diff(T previousState);
     
    -    /**
    -     * Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput
    -     */
    -    Diff readDiffFrom(StreamInput in) throws IOException;
    -
    -    /**
    -     * Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged.
    -     */
    -    T readFrom(StreamInput in) throws IOException;
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java
    index 1a3557890dd..ba769c34d0e 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java
    @@ -23,10 +23,12 @@ import com.carrotsearch.hppc.cursors.IntCursor;
     import com.carrotsearch.hppc.cursors.IntObjectCursor;
     import com.carrotsearch.hppc.cursors.ObjectCursor;
     import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
    +import org.elasticsearch.Version;
     import org.elasticsearch.common.collect.ImmutableOpenIntMap;
     import org.elasticsearch.common.collect.ImmutableOpenMap;
     import org.elasticsearch.common.io.stream.StreamInput;
     import org.elasticsearch.common.io.stream.StreamOutput;
    +import org.elasticsearch.common.io.stream.Writeable.Reader;
     
     import java.io.IOException;
     import java.util.ArrayList;
    @@ -74,7 +76,7 @@ public final class DiffableUtils {
         /**
          * Calculates diff between two ImmutableOpenMaps of non-diffable objects
          */
    -    public static  MapDiff> diff(ImmutableOpenMap before, ImmutableOpenMap after, KeySerializer keySerializer, NonDiffableValueSerializer valueSerializer) {
    +    public static  MapDiff> diff(ImmutableOpenMap before, ImmutableOpenMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) {
             assert after != null && before != null;
             return new ImmutableOpenMapDiff<>(before, after, keySerializer, valueSerializer);
         }
    @@ -90,7 +92,7 @@ public final class DiffableUtils {
         /**
          * Calculates diff between two ImmutableOpenIntMaps of non-diffable objects
          */
    -    public static  MapDiff> diff(ImmutableOpenIntMap before, ImmutableOpenIntMap after, KeySerializer keySerializer, NonDiffableValueSerializer valueSerializer) {
    +    public static  MapDiff> diff(ImmutableOpenIntMap before, ImmutableOpenIntMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) {
             assert after != null && before != null;
             return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, valueSerializer);
         }
    @@ -106,7 +108,7 @@ public final class DiffableUtils {
         /**
          * Calculates diff between two Maps of non-diffable objects
          */
    -    public static  MapDiff> diff(Map before, Map after, KeySerializer keySerializer, NonDiffableValueSerializer valueSerializer) {
    +    public static  MapDiff> diff(Map before, Map after, KeySerializer keySerializer, ValueSerializer valueSerializer) {
             assert after != null && before != null;
             return new JdkMapDiff<>(before, after, keySerializer, valueSerializer);
         }
    @@ -135,22 +137,22 @@ public final class DiffableUtils {
         /**
          * Loads an object that represents difference between two ImmutableOpenMaps of Diffable objects using Diffable proto object
          */
    -    public static > MapDiff> readImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, T proto) throws IOException {
    -        return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
    +    public static > MapDiff> readImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException {
    +        return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader));
         }
     
         /**
          * Loads an object that represents difference between two ImmutableOpenIntMaps of Diffable objects using Diffable proto object
          */
    -    public static > MapDiff> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer keySerializer, T proto) throws IOException {
    -        return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
    +    public static > MapDiff> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException {
    +        return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader));
         }
     
         /**
          * Loads an object that represents difference between two Maps of Diffable objects using Diffable proto object
          */
    -    public static > MapDiff> readJdkMapDiff(StreamInput in, KeySerializer keySerializer, T proto) throws IOException {
    -        return new JdkMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
    +    public static > MapDiff> readJdkMapDiff(StreamInput in, KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException {
    +        return new JdkMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader));
         }
     
         /**
    @@ -164,7 +166,7 @@ public final class DiffableUtils {
                 super(in, keySerializer, valueSerializer);
             }
     
    -        public JdkMapDiff(Map before, Map after,
    +        JdkMapDiff(Map before, Map after,
                               KeySerializer keySerializer, ValueSerializer valueSerializer) {
                 super(keySerializer, valueSerializer);
                 assert after != null && before != null;
    @@ -214,12 +216,17 @@ public final class DiffableUtils {
          *
          * @param  the object type
          */
    -    private static class ImmutableOpenMapDiff extends MapDiff> {
    +    public static class ImmutableOpenMapDiff extends MapDiff> {
     
             protected ImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException {
                 super(in, keySerializer, valueSerializer);
             }
     
    +        private ImmutableOpenMapDiff(KeySerializer keySerializer, ValueSerializer valueSerializer,
    +                                     List deletes, Map> diffs, Map upserts) {
    +            super(keySerializer, valueSerializer, deletes, diffs, upserts);
    +        }
    +
             public ImmutableOpenMapDiff(ImmutableOpenMap before, ImmutableOpenMap after,
                                         KeySerializer keySerializer, ValueSerializer valueSerializer) {
                 super(keySerializer, valueSerializer);
    @@ -245,6 +252,21 @@ public final class DiffableUtils {
                 }
             }
     
    +        /**
    +         * Returns a new diff map with the given key removed, does not modify the invoking instance.
    +         * If the key does not exist in the diff map, the same instance is returned.
    +         */
    +        public ImmutableOpenMapDiff withKeyRemoved(K key) {
    +            if (this.diffs.containsKey(key) == false && this.upserts.containsKey(key) == false) {
    +                return this;
    +            }
    +            Map> newDiffs = new HashMap<>(this.diffs);
    +            newDiffs.remove(key);
    +            Map newUpserts = new HashMap<>(this.upserts);
    +            newUpserts.remove(key);
    +            return new ImmutableOpenMapDiff<>(this.keySerializer, this.valueSerializer, this.deletes, newDiffs, newUpserts);
    +        }
    +
             @Override
             public ImmutableOpenMap apply(ImmutableOpenMap map) {
                 ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder();
    @@ -276,7 +298,7 @@ public final class DiffableUtils {
                 super(in, keySerializer, valueSerializer);
             }
     
    -        public ImmutableOpenIntMapDiff(ImmutableOpenIntMap before, ImmutableOpenIntMap after,
    +        ImmutableOpenIntMapDiff(ImmutableOpenIntMap before, ImmutableOpenIntMap after,
                                            KeySerializer keySerializer, ValueSerializer valueSerializer) {
                 super(keySerializer, valueSerializer);
                 assert after != null && before != null;
    @@ -346,6 +368,15 @@ public final class DiffableUtils {
                 upserts = new HashMap<>();
             }
     
    +        protected MapDiff(KeySerializer keySerializer, ValueSerializer valueSerializer,
    +                          List deletes, Map> diffs, Map upserts) {
    +            this.keySerializer = keySerializer;
    +            this.valueSerializer = valueSerializer;
    +            this.deletes = deletes;
    +            this.diffs = diffs;
    +            this.upserts = upserts;
    +        }
    +
             protected MapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException {
                 this.keySerializer = keySerializer;
                 this.valueSerializer = valueSerializer;
    @@ -406,12 +437,29 @@ public final class DiffableUtils {
                 for (K delete : deletes) {
                     keySerializer.writeKey(delete, out);
                 }
    -            out.writeVInt(diffs.size());
    -            for (Map.Entry> entry : diffs.entrySet()) {
    -                keySerializer.writeKey(entry.getKey(), out);
    -                valueSerializer.writeDiff(entry.getValue(), out);
    +            Version version = out.getVersion();
    +            // filter out custom states not supported by the other node
    +            int diffCount = 0;
    +            for (Diff diff : diffs.values()) {
    +                if(valueSerializer.supportsVersion(diff, version)) {
    +                    diffCount++;
    +                }
                 }
    -            out.writeVInt(upserts.size());
    +            out.writeVInt(diffCount);
    +            for (Map.Entry> entry : diffs.entrySet()) {
    +                if(valueSerializer.supportsVersion(entry.getValue(), version)) {
    +                    keySerializer.writeKey(entry.getKey(), out);
    +                    valueSerializer.writeDiff(entry.getValue(), out);
    +                }
    +            }
    +            // filter out custom states not supported by the other node
    +            int upsertsCount = 0;
    +            for (T upsert : upserts.values()) {
    +                if(valueSerializer.supportsVersion(upsert, version)) {
    +                    upsertsCount++;
    +                }
    +            }
    +            out.writeVInt(upsertsCount);
                 for (Map.Entry entry : upserts.entrySet()) {
                     keySerializer.writeKey(entry.getKey(), out);
                     valueSerializer.write(entry.getValue(), out);
    @@ -511,6 +559,20 @@ public final class DiffableUtils {
              */
             boolean supportsDiffableValues();
     
    +        /**
    +         * Whether this serializer supports the version of the output stream
    +         */
    +        default boolean supportsVersion(Diff value, Version version) {
    +            return true;
    +        }
    +
    +        /**
    +         * Whether this serializer supports the version of the output stream
    +         */
    +        default boolean supportsVersion(V value, Version version) {
    +            return true;
    +        }
    +
             /**
              * Computes diff if this serializer supports diffable values
              */
    @@ -600,25 +662,27 @@ public final class DiffableUtils {
         }
     
         /**
    -     * Implementation of the ValueSerializer that uses a prototype object for reading operations
    +     * Implementation of the ValueSerializer that wraps value and diff readers.
          *
          * Note: this implementation is ignoring the key.
          */
    -    public static class DiffablePrototypeValueReader> extends DiffableValueSerializer {
    -        private final V proto;
    +    public static class DiffableValueReader> extends DiffableValueSerializer {
    +        private final Reader reader;
    +        private final Reader> diffReader;
     
    -        public DiffablePrototypeValueReader(V proto) {
    -            this.proto = proto;
    +        public DiffableValueReader(Reader reader, Reader> diffReader) {
    +            this.reader = reader;
    +            this.diffReader = diffReader;
             }
     
             @Override
             public V read(StreamInput in, K key) throws IOException {
    -            return proto.readFrom(in);
    +            return reader.read(in);
             }
     
             @Override
             public Diff readDiff(StreamInput in, K key) throws IOException {
    -            return proto.readDiffFrom(in);
    +            return diffReader.read(in);
             }
         }
     
    diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
    index b8ac2a5eb50..a9392d3c017 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
    @@ -377,14 +377,13 @@ public class InternalClusterInfoService extends AbstractComponent
             MetaData meta = state.getMetaData();
             for (ShardStats s : stats) {
                 IndexMetaData indexMeta = meta.index(s.getShardRouting().index());
    -            Settings indexSettings = indexMeta == null ? null : indexMeta.getSettings();
                 newShardRoutingToDataPath.put(s.getShardRouting(), s.getDataPath());
                 long size = s.getStats().getStore().sizeInBytes();
                 String sid = ClusterInfo.shardIdentifierFromRouting(s.getShardRouting());
                 if (logger.isTraceEnabled()) {
                     logger.trace("shard: {} size: {}", sid, size);
                 }
    -            if (indexSettings != null && IndexMetaData.isIndexUsingShadowReplicas(indexSettings)) {
    +            if (indexMeta != null && indexMeta.isIndexUsingShadowReplicas()) {
                     // Shards on a shared filesystem should be considered of size 0
                     if (logger.isTraceEnabled()) {
                         logger.trace("shard: {} is using shadow replicas and will be treated as size 0", sid);
    diff --git a/core/src/main/java/org/elasticsearch/cluster/LocalClusterUpdateTask.java b/core/src/main/java/org/elasticsearch/cluster/LocalClusterUpdateTask.java
    new file mode 100644
    index 00000000000..9692ff8d4e1
    --- /dev/null
    +++ b/core/src/main/java/org/elasticsearch/cluster/LocalClusterUpdateTask.java
    @@ -0,0 +1,93 @@
    +/*
    + * Licensed to Elasticsearch under one or more contributor
    + * license agreements. See the NOTICE file distributed with
    + * this work for additional information regarding copyright
    + * ownership. Elasticsearch licenses this file to you under
    + * the Apache License, Version 2.0 (the "License"); you may
    + * not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing,
    + * software distributed under the License is distributed on an
    + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    + * KIND, either express or implied.  See the License for the
    + * specific language governing permissions and limitations
    + * under the License.
    + */
    +package org.elasticsearch.cluster;
    +
    +import org.elasticsearch.common.Nullable;
    +import org.elasticsearch.common.Priority;
    +import org.elasticsearch.common.unit.TimeValue;
    +
    +import java.util.List;
    +
    +/**
    + * Used to apply state updates on nodes that are not necessarily master
    + */
    +public abstract class LocalClusterUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor,
    +    ClusterStateTaskListener {
    +
    +    private final Priority priority;
    +
    +    public LocalClusterUpdateTask() {
    +        this(Priority.NORMAL);
    +    }
    +
    +    public LocalClusterUpdateTask(Priority priority) {
    +        this.priority = priority;
    +    }
    +
    +    public abstract ClusterTasksResult execute(ClusterState currentState) throws Exception;
    +
    +    @Override
    +    public final ClusterTasksResult execute(ClusterState currentState,
    +                                                                    List tasks) throws Exception {
    +        assert tasks.size() == 1 && tasks.get(0) == this : "expected one-element task list containing current object but was " + tasks;
    +        ClusterTasksResult result = execute(currentState);
    +        return ClusterTasksResult.builder().successes(tasks).build(result, currentState);
    +    }
    +
    +    /**
    +     * node stepped down as master or has lost connection to the master
    +     */
    +    public static ClusterTasksResult noMaster() {
    +        return new ClusterTasksResult(true, null, null);
    +    }
    +
    +    /**
    +     * no changes were made to the cluster state. Useful to execute a runnable on the cluster state applier thread
    +     */
    +    public static ClusterTasksResult unchanged() {
    +        return new ClusterTasksResult(false, null, null);
    +    }
    +
    +    /**
    +     * locally apply cluster state received from a master
    +     */
    +    public static ClusterTasksResult newState(ClusterState clusterState) {
    +        return new ClusterTasksResult(false, clusterState, null);
    +    }
    +
    +    @Override
    +    public String describeTasks(List tasks) {
    +        return ""; // one of task, source is enough
    +    }
    +
    +    @Nullable
    +    public TimeValue timeout() {
    +        return null;
    +    }
    +
    +    @Override
    +    public Priority priority() {
    +        return priority;
    +    }
    +
    +    @Override
    +    public final boolean runOnlyOnMaster() {
    +        return false;
    +    }
    +}
    diff --git a/core/src/main/java/org/elasticsearch/common/ParseFieldMatcherSupplier.java b/core/src/main/java/org/elasticsearch/cluster/NamedDiff.java
    similarity index 63%
    rename from core/src/main/java/org/elasticsearch/common/ParseFieldMatcherSupplier.java
    rename to core/src/main/java/org/elasticsearch/cluster/NamedDiff.java
    index 672890c2b97..9da3167ae88 100644
    --- a/core/src/main/java/org/elasticsearch/common/ParseFieldMatcherSupplier.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/NamedDiff.java
    @@ -17,20 +17,20 @@
      * under the License.
      */
     
    -package org.elasticsearch.common;
    +package org.elasticsearch.cluster;
     
    -import org.elasticsearch.index.query.QueryParseContext;
    -import org.elasticsearch.index.query.QueryShardContext;
    +import org.elasticsearch.Version;
    +import org.elasticsearch.common.io.stream.NamedWriteable;
     
     /**
    - * This interface should be implemented by classes like {@link QueryParseContext} or {@link QueryShardContext} that
    - * are able to carry a {@link ParseFieldMatcher}.
    + * Diff that also support NamedWriteable interface
      */
    -@FunctionalInterface
    -public interface ParseFieldMatcherSupplier {
    -
    +public interface NamedDiff> extends Diff, NamedWriteable {
         /**
    -     * @return the parseFieldMatcher
    +     * The minimal version of the recipient this custom object can be sent to
          */
    -    ParseFieldMatcher getParseFieldMatcher();
    +    default Version getMinimalSupportedVersion() {
    +        return Version.CURRENT.minimumCompatibilityVersion();
    +    }
    +
     }
    diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestFragmentParser.java b/core/src/main/java/org/elasticsearch/cluster/NamedDiffable.java
    similarity index 64%
    rename from test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestFragmentParser.java
    rename to core/src/main/java/org/elasticsearch/cluster/NamedDiffable.java
    index 390ac1ce366..07974422096 100644
    --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestFragmentParser.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/NamedDiffable.java
    @@ -16,18 +16,20 @@
      * specific language governing permissions and limitations
      * under the License.
      */
    -package org.elasticsearch.test.rest.yaml.parser;
     
    -import java.io.IOException;
    +package org.elasticsearch.cluster;
    +
    +import org.elasticsearch.Version;
    +import org.elasticsearch.common.io.stream.NamedWriteable;
     
     /**
    - * Base parser for a REST test suite fragment
    - * @param  the test fragment's type that gets parsed and returned
    + * Diff that also support NamedWriteable interface
      */
    -public interface ClientYamlTestFragmentParser {
    -
    +public interface NamedDiffable extends Diffable, NamedWriteable {
         /**
    -     * Parses a test fragment given the current {@link ClientYamlTestSuiteParseContext}
    +     * The minimal version of the recipient this custom object can be sent to
          */
    -    T parse(ClientYamlTestSuiteParseContext parseContext) throws IOException, ClientYamlTestParseException;
    +    default Version getMinimalSupportedVersion() {
    +        return Version.CURRENT.minimumCompatibilityVersion();
    +    }
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/NamedDiffableValueSerializer.java b/core/src/main/java/org/elasticsearch/cluster/NamedDiffableValueSerializer.java
    new file mode 100644
    index 00000000000..c6434db9e87
    --- /dev/null
    +++ b/core/src/main/java/org/elasticsearch/cluster/NamedDiffableValueSerializer.java
    @@ -0,0 +1,58 @@
    +/*
    + * Licensed to Elasticsearch under one or more contributor
    + * license agreements. See the NOTICE file distributed with
    + * this work for additional information regarding copyright
    + * ownership. Elasticsearch licenses this file to you under
    + * the Apache License, Version 2.0 (the "License"); you may
    + * not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing,
    + * software distributed under the License is distributed on an
    + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    + * KIND, either express or implied.  See the License for the
    + * specific language governing permissions and limitations
    + * under the License.
    + */
    +
    +package org.elasticsearch.cluster;
    +
    +import org.elasticsearch.Version;
    +import org.elasticsearch.common.io.stream.StreamInput;
    +
    +import java.io.IOException;
    +
    +/**
    + * Value Serializer for named diffables
    + */
    +public class NamedDiffableValueSerializer> extends DiffableUtils.DiffableValueSerializer {
    +
    +    private final Class tClass;
    +
    +    public NamedDiffableValueSerializer(Class tClass) {
    +        this.tClass = tClass;
    +    }
    +
    +    @Override
    +    public T read(StreamInput in, String key) throws IOException {
    +        return in.readNamedWriteable(tClass, key);
    +    }
    +
    +    @Override
    +    public boolean supportsVersion(Diff value, Version version) {
    +        return version.onOrAfter(((NamedDiff)value).getMinimalSupportedVersion());
    +    }
    +
    +    @Override
    +    public boolean supportsVersion(T value, Version version) {
    +        return version.onOrAfter(value.getMinimalSupportedVersion());
    +    }
    +
    +    @SuppressWarnings("unchecked")
    +    @Override
    +    public Diff readDiff(StreamInput in, String key) throws IOException {
    +        return in.readNamedWriteable(NamedDiff.class, key);
    +    }
    +}
    diff --git a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java
    index 94333c10dde..bda1481130c 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java
    @@ -36,7 +36,9 @@ import org.elasticsearch.discovery.zen.NodesFaultDetection;
     import org.elasticsearch.threadpool.ThreadPool;
     import org.elasticsearch.transport.TransportService;
     
    +import java.util.HashSet;
     import java.util.List;
    +import java.util.Set;
     import java.util.concurrent.ConcurrentMap;
     import java.util.concurrent.ScheduledFuture;
     
    @@ -76,20 +78,26 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
             this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings);
         }
     
    -    public void connectToNodes(List addedNodes) {
    +    public void connectToNodes(Iterable discoveryNodes) {
     
             // TODO: do this in parallel (and wait)
    -        for (final DiscoveryNode node : addedNodes) {
    +        for (final DiscoveryNode node : discoveryNodes) {
                 try (Releasable ignored = nodeLocks.acquire(node)) {
    -                Integer current = nodes.put(node, 0);
    -                assert current == null : "node " + node + " was added in event but already in internal nodes";
    +                nodes.putIfAbsent(node, 0);
                     validateNodeConnected(node);
                 }
             }
         }
     
    -    public void disconnectFromNodes(List removedNodes) {
    -        for (final DiscoveryNode node : removedNodes) {
    +    /**
    +     * Disconnects from all nodes except the ones provided as parameter
    +     */
    +    public void disconnectFromNodesExcept(Iterable nodesToKeep) {
    +        Set currentNodes = new HashSet<>(nodes.keySet());
    +        for (DiscoveryNode node : nodesToKeep) {
    +            currentNodes.remove(node);
    +        }
    +        for (final DiscoveryNode node : currentNodes) {
                 try (Releasable ignored = nodeLocks.acquire(node)) {
                     Integer current = nodes.remove(node);
                     assert current != null : "node " + node + " was removed in event but not in internal nodes";
    diff --git a/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java b/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java
    index 55a09f87f75..55e70dbe644 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java
    @@ -39,12 +39,10 @@ import java.util.Objects;
     /**
      * Meta data about restore processes that are currently executing
      */
    -public class RestoreInProgress extends AbstractDiffable implements Custom {
    +public class RestoreInProgress extends AbstractNamedDiffable implements Custom {
     
         public static final String TYPE = "restore";
     
    -    public static final RestoreInProgress PROTO = new RestoreInProgress();
    -
         private final List entries;
     
         /**
    @@ -377,15 +375,15 @@ public class RestoreInProgress extends AbstractDiffable implements Custo
          * {@inheritDoc}
          */
         @Override
    -    public String type() {
    +    public String getWriteableName() {
             return TYPE;
         }
     
    -    /**
    -     * {@inheritDoc}
    -     */
    -    @Override
    -    public RestoreInProgress readFrom(StreamInput in) throws IOException {
    +    public static NamedDiff readDiffFrom(StreamInput in) throws IOException {
    +        return readDiffFrom(Custom.class, TYPE, in);
    +    }
    +
    +    public RestoreInProgress(StreamInput in) throws IOException {
             Entry[] entries = new Entry[in.readVInt()];
             for (int i = 0; i < entries.length; i++) {
                 Snapshot snapshot = new Snapshot(in);
    @@ -404,7 +402,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo
                 }
                 entries[i] = new Entry(snapshot, state, Collections.unmodifiableList(indexBuilder), builder.build());
             }
    -        return new RestoreInProgress(entries);
    +        this.entries = Arrays.asList(entries);
         }
     
         /**
    diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java
    new file mode 100644
    index 00000000000..b3ab12fe21a
    --- /dev/null
    +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java
    @@ -0,0 +1,220 @@
    +/*
    + * Licensed to Elasticsearch under one or more contributor
    + * license agreements. See the NOTICE file distributed with
    + * this work for additional information regarding copyright
    + * ownership. Elasticsearch licenses this file to you under
    + * the Apache License, Version 2.0 (the "License"); you may
    + * not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing,
    + * software distributed under the License is distributed on an
    + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    + * KIND, either express or implied.  See the License for the
    + * specific language governing permissions and limitations
    + * under the License.
    + */
    +
    +package org.elasticsearch.cluster;
    +
    +import org.elasticsearch.Version;
    +import org.elasticsearch.cluster.ClusterState.Custom;
    +import org.elasticsearch.common.io.stream.StreamInput;
    +import org.elasticsearch.common.io.stream.StreamOutput;
    +import org.elasticsearch.common.io.stream.Writeable;
    +import org.elasticsearch.common.xcontent.XContentBuilder;
    +import org.elasticsearch.snapshots.Snapshot;
    +
    +import java.io.IOException;
    +import java.util.ArrayList;
    +import java.util.Collections;
    +import java.util.List;
    +import java.util.Objects;
    +
    +/**
    + * A class that represents the snapshot deletions that are in progress in the cluster.
    + */
    +public class SnapshotDeletionsInProgress extends AbstractNamedDiffable implements Custom {
    +
    +    public static final String TYPE = "snapshot_deletions";
    +    // the version where SnapshotDeletionsInProgress was introduced
    +    public static final Version VERSION_INTRODUCED = Version.V_5_2_0_UNRELEASED;
    +
    +    // the list of snapshot deletion request entries
    +    private final List entries;
    +
    +    private SnapshotDeletionsInProgress(List entries) {
    +        this.entries = Collections.unmodifiableList(entries);
    +    }
    +
    +    public SnapshotDeletionsInProgress(StreamInput in) throws IOException {
    +        this.entries = Collections.unmodifiableList(in.readList(Entry::new));
    +    }
    +
    +    /**
    +     * Returns a new instance of {@link SnapshotDeletionsInProgress} with the given
    +     * {@link Entry} added.
    +     */
    +    public static SnapshotDeletionsInProgress newInstance(Entry entry) {
    +        return new SnapshotDeletionsInProgress(Collections.singletonList(entry));
    +    }
    +
    +    /**
    +     * Returns a new instance of {@link SnapshotDeletionsInProgress} which adds
    +     * the given {@link Entry} to the invoking instance.
    +     */
    +    public SnapshotDeletionsInProgress withAddedEntry(Entry entry) {
    +        List entries = new ArrayList<>(getEntries());
    +        entries.add(entry);
    +        return new SnapshotDeletionsInProgress(entries);
    +    }
    +
    +    /**
    +     * Returns a new instance of {@link SnapshotDeletionsInProgress} which removes
    +     * the given entry from the invoking instance.
    +     */
    +    public SnapshotDeletionsInProgress withRemovedEntry(Entry entry) {
    +        List entries = new ArrayList<>(getEntries());
    +        entries.remove(entry);
    +        return new SnapshotDeletionsInProgress(entries);
    +    }
    +
    +    /**
    +     * Returns an unmodifiable list of snapshot deletion entries.
    +     */
    +    public List getEntries() {
    +        return entries;
    +    }
    +
    +    /**
    +     * Returns {@code true} if there are snapshot deletions in progress in the cluster,
    +     * returns {@code false} otherwise.
    +     */
    +    public boolean hasDeletionsInProgress() {
    +        return entries.isEmpty() == false;
    +    }
    +
    +    @Override
    +    public String getWriteableName() {
    +        return TYPE;
    +    }
    +
    +    @Override
    +    public boolean equals(Object o) {
    +        if (this == o) {
    +            return true;
    +        }
    +        if (o == null || getClass() != o.getClass()) {
    +            return false;
    +        }
    +
    +        SnapshotDeletionsInProgress that = (SnapshotDeletionsInProgress) o;
    +        return entries.equals(that.entries);
    +    }
    +
    +    @Override
    +    public int hashCode() {
    +        return 31 + entries.hashCode();
    +    }
    +
    +    @Override
    +    public void writeTo(StreamOutput out) throws IOException {
    +        out.writeList(entries);
    +    }
    +
    +    public static NamedDiff readDiffFrom(StreamInput in) throws IOException {
    +        return readDiffFrom(Custom.class, TYPE, in);
    +    }
    +
    +    @Override
    +    public Version getMinimalSupportedVersion() {
    +        return VERSION_INTRODUCED;
    +    }
    +
    +    @Override
    +    public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
    +        builder.startArray(TYPE);
    +        for (Entry entry : entries) {
    +            builder.startObject();
    +            {
    +                builder.field("repository", entry.snapshot.getRepository());
    +                builder.field("snapshot", entry.snapshot.getSnapshotId().getName());
    +                builder.timeValueField("start_time_millis", "start_time", entry.startTime);
    +                builder.field("repository_state_id", entry.repositoryStateId);
    +            }
    +            builder.endObject();
    +        }
    +        builder.endArray();
    +        return builder;
    +    }
    +
    +    /**
    +     * A class representing a snapshot deletion request entry in the cluster state.
    +     */
    +    public static final class Entry implements Writeable {
    +        private final Snapshot snapshot;
    +        private final long startTime;
    +        private final long repositoryStateId;
    +
    +        public Entry(Snapshot snapshot, long startTime, long repositoryStateId) {
    +            this.snapshot = snapshot;
    +            this.startTime = startTime;
    +            this.repositoryStateId = repositoryStateId;
    +        }
    +
    +        public Entry(StreamInput in) throws IOException {
    +            this.snapshot = new Snapshot(in);
    +            this.startTime = in.readVLong();
    +            this.repositoryStateId = in.readLong();
    +        }
    +
    +        /**
    +         * The snapshot to delete.
    +         */
    +        public Snapshot getSnapshot() {
    +            return snapshot;
    +        }
    +
    +        /**
    +         * The start time in milliseconds for deleting the snapshots.
    +         */
    +        public long getStartTime() {
    +            return startTime;
    +        }
    +
    +        /**
    +         * The repository state id at the time the snapshot deletion began.
    +         */
    +        public long getRepositoryStateId() {
    +            return repositoryStateId;
    +        }
    +
    +        @Override
    +        public boolean equals(Object o) {
    +            if (this == o) {
    +                return true;
    +            }
    +            if (o == null || getClass() != o.getClass()) {
    +                return false;
    +            }
    +            Entry that = (Entry) o;
    +            return snapshot.equals(that.snapshot)
    +                       && startTime == that.startTime
    +                       && repositoryStateId == that.repositoryStateId;
    +        }
    +
    +        @Override
    +        public int hashCode() {
    +            return Objects.hash(snapshot, startTime, repositoryStateId);
    +        }
    +
    +        @Override
    +        public void writeTo(StreamOutput out) throws IOException {
    +            snapshot.writeTo(out);
    +            out.writeVLong(startTime);
    +            out.writeLong(repositoryStateId);
    +        }
    +    }
    +}
    diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java
    index 6df5f85987d..0ac1e8e4090 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java
    @@ -22,6 +22,7 @@ package org.elasticsearch.cluster;
     import com.carrotsearch.hppc.ObjectContainer;
     import com.carrotsearch.hppc.cursors.ObjectCursor;
     import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
    +import org.elasticsearch.Version;
     import org.elasticsearch.cluster.ClusterState.Custom;
     import org.elasticsearch.common.collect.ImmutableOpenMap;
     import org.elasticsearch.common.io.stream.StreamInput;
    @@ -43,10 +44,14 @@ import java.util.Map;
     /**
      * Meta data about snapshots that are currently executing
      */
    -public class SnapshotsInProgress extends AbstractDiffable implements Custom {
    +public class SnapshotsInProgress extends AbstractNamedDiffable implements Custom {
         public static final String TYPE = "snapshots";
     
    -    public static final SnapshotsInProgress PROTO = new SnapshotsInProgress();
    +    // denotes an undefined repository state id, which will happen when receiving a cluster state with
    +    // a snapshot in progress from a pre 5.2.x node
    +    public static final long UNDEFINED_REPOSITORY_STATE_ID = -2L;
    +    // the version where repository state ids were introduced
    +    private static final Version REPOSITORY_ID_INTRODUCED_VERSION = Version.V_5_2_0_UNRELEASED;
     
         @Override
         public boolean equals(Object o) {
    @@ -74,9 +79,10 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
             private final List indices;
             private final ImmutableOpenMap> waitingIndices;
             private final long startTime;
    +        private final long repositoryStateId;
     
             public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, State state, List indices,
    -                     long startTime, ImmutableOpenMap shards) {
    +                     long startTime, long repositoryStateId, ImmutableOpenMap shards) {
                 this.state = state;
                 this.snapshot = snapshot;
                 this.includeGlobalState = includeGlobalState;
    @@ -90,10 +96,12 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
                     this.shards = shards;
                     this.waitingIndices = findWaitingIndices(shards);
                 }
    +            this.repositoryStateId = repositoryStateId;
             }
     
             public Entry(Entry entry, State state, ImmutableOpenMap shards) {
    -            this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, shards);
    +            this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime,
    +                 entry.repositoryStateId, shards);
             }
     
             public Entry(Entry entry, ImmutableOpenMap shards) {
    @@ -132,6 +140,10 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
                 return startTime;
             }
     
    +        public long getRepositoryStateId() {
    +            return repositoryStateId;
    +        }
    +
             @Override
             public boolean equals(Object o) {
                 if (this == o) return true;
    @@ -147,6 +159,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
                 if (!snapshot.equals(entry.snapshot)) return false;
                 if (state != entry.state) return false;
                 if (!waitingIndices.equals(entry.waitingIndices)) return false;
    +            if (repositoryStateId != entry.repositoryStateId) return false;
     
                 return true;
             }
    @@ -161,6 +174,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
                 result = 31 * result + indices.hashCode();
                 result = 31 * result + waitingIndices.hashCode();
                 result = 31 * result + Long.hashCode(startTime);
    +            result = 31 * result + Long.hashCode(repositoryStateId);
                 return result;
             }
     
    @@ -361,12 +375,15 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
         }
     
         @Override
    -    public String type() {
    +    public String getWriteableName() {
             return TYPE;
         }
     
    -    @Override
    -    public SnapshotsInProgress readFrom(StreamInput in) throws IOException {
    +    public static NamedDiff readDiffFrom(StreamInput in) throws IOException {
    +        return readDiffFrom(Custom.class, TYPE, in);
    +    }
    +
    +    public SnapshotsInProgress(StreamInput in) throws IOException {
             Entry[] entries = new Entry[in.readVInt()];
             for (int i = 0; i < entries.length; i++) {
                 Snapshot snapshot = new Snapshot(in);
    @@ -387,15 +404,20 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
                     State shardState = State.fromValue(in.readByte());
                     builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState));
                 }
    +            long repositoryStateId = UNDEFINED_REPOSITORY_STATE_ID;
    +            if (in.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) {
    +                repositoryStateId = in.readLong();
    +            }
                 entries[i] = new Entry(snapshot,
                                        includeGlobalState,
                                        partial,
                                        state,
                                        Collections.unmodifiableList(indexBuilder),
                                        startTime,
    +                                   repositoryStateId,
                                        builder.build());
             }
    -        return new SnapshotsInProgress(entries);
    +        this.entries = Arrays.asList(entries);
         }
     
         @Override
    @@ -417,6 +439,9 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
                     out.writeOptionalString(shardEntry.value.nodeId());
                     out.writeByte(shardEntry.value.state().value());
                 }
    +            if (out.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) {
    +                out.writeLong(entry.repositoryStateId);
    +            }
             }
         }
     
    @@ -430,6 +455,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
         private static final String INDICES = "indices";
         private static final String START_TIME_MILLIS = "start_time_millis";
         private static final String START_TIME = "start_time";
    +    private static final String REPOSITORY_STATE_ID = "repository_state_id";
         private static final String SHARDS = "shards";
         private static final String INDEX = "index";
         private static final String SHARD = "shard";
    @@ -461,6 +487,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
             }
             builder.endArray();
             builder.timeValueField(START_TIME_MILLIS, START_TIME, entry.startTime());
    +        builder.field(REPOSITORY_STATE_ID, entry.getRepositoryStateId());
             builder.startArray(SHARDS);
             {
                 for (ObjectObjectCursor shardEntry : entry.shards) {
    diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
    index 0cf124612d0..8973890021f 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
    @@ -25,10 +25,10 @@ import org.apache.logging.log4j.util.Supplier;
     import org.elasticsearch.ElasticsearchException;
     import org.elasticsearch.ExceptionsHelper;
     import org.elasticsearch.cluster.ClusterChangedEvent;
    +import org.elasticsearch.cluster.ClusterStateTaskExecutor;
     import org.elasticsearch.cluster.ClusterState;
     import org.elasticsearch.cluster.ClusterStateObserver;
     import org.elasticsearch.cluster.ClusterStateTaskConfig;
    -import org.elasticsearch.cluster.ClusterStateTaskExecutor;
     import org.elasticsearch.cluster.ClusterStateTaskListener;
     import org.elasticsearch.cluster.MasterNodeChangePredicate;
     import org.elasticsearch.cluster.NotMasterException;
    @@ -69,6 +69,7 @@ import java.util.HashSet;
     import java.util.List;
     import java.util.Locale;
     import java.util.Set;
    +import java.util.function.Predicate;
     
     public class ShardStateAction extends AbstractComponent {
     
    @@ -91,11 +92,13 @@ public class ShardStateAction extends AbstractComponent {
             transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardEntry::new, ThreadPool.Names.SAME, new ShardFailedTransportHandler(clusterService, new ShardFailedClusterStateTaskExecutor(allocationService, routingService, logger), logger));
         }
     
    -    private void sendShardAction(final String actionName, final ClusterStateObserver observer, final ShardEntry shardEntry, final Listener listener) {
    -        DiscoveryNode masterNode = observer.observedState().nodes().getMasterNode();
    +    private void sendShardAction(final String actionName, final ClusterState currentState, final ShardEntry shardEntry, final Listener listener) {
    +        ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext());
    +        DiscoveryNode masterNode = currentState.nodes().getMasterNode();
    +        Predicate changePredicate = MasterNodeChangePredicate.build(currentState);
             if (masterNode == null) {
                 logger.warn("{} no master known for action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry);
    -            waitForNewMasterAndRetry(actionName, observer, shardEntry, listener);
    +            waitForNewMasterAndRetry(actionName, observer, shardEntry, listener, changePredicate);
             } else {
                 logger.debug("{} sending [{}] to [{}] for shard entry [{}]", shardEntry.shardId, actionName, masterNode.getId(), shardEntry);
                 transportService.sendRequest(masterNode,
    @@ -108,7 +111,7 @@ public class ShardStateAction extends AbstractComponent {
                         @Override
                         public void handleException(TransportException exp) {
                             if (isMasterChannelException(exp)) {
    -                            waitForNewMasterAndRetry(actionName, observer, shardEntry, listener);
    +                            waitForNewMasterAndRetry(actionName, observer, shardEntry, listener, changePredicate);
                             } else {
                                 logger.warn((Supplier) () -> new ParameterizedMessage("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", shardEntry.shardId, actionName, masterNode, shardEntry), exp);
                                 listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp);
    @@ -162,20 +165,19 @@ public class ShardStateAction extends AbstractComponent {
     
         private void shardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message,
                                  @Nullable final Exception failure, Listener listener, ClusterState currentState) {
    -        ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext());
             ShardEntry shardEntry = new ShardEntry(shardId, allocationId, primaryTerm, message, failure);
    -        sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardEntry, listener);
    +        sendShardAction(SHARD_FAILED_ACTION_NAME, currentState, shardEntry, listener);
         }
     
         // visible for testing
    -    protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardEntry shardEntry, Listener listener) {
    +    protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardEntry shardEntry, Listener listener, Predicate changePredicate) {
             observer.waitForNextChange(new ClusterStateObserver.Listener() {
                 @Override
                 public void onNewClusterState(ClusterState state) {
                     if (logger.isTraceEnabled()) {
                         logger.trace("new cluster state [{}] after waiting for master election to fail shard entry [{}]", state, shardEntry);
                     }
    -                sendShardAction(actionName, observer, shardEntry, listener);
    +                sendShardAction(actionName, state, shardEntry, listener);
                 }
     
                 @Override
    @@ -189,7 +191,7 @@ public class ShardStateAction extends AbstractComponent {
                     // we wait indefinitely for a new master
                     assert false;
                 }
    -        }, MasterNodeChangePredicate.build(observer.observedState()));
    +        }, changePredicate);
         }
     
         private static class ShardFailedTransportHandler implements TransportRequestHandler {
    @@ -197,7 +199,7 @@ public class ShardStateAction extends AbstractComponent {
             private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor;
             private final Logger logger;
     
    -        public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, Logger logger) {
    +        ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, Logger logger) {
                 this.clusterService = clusterService;
                 this.shardFailedClusterStateTaskExecutor = shardFailedClusterStateTaskExecutor;
                 this.logger = logger;
    @@ -258,8 +260,8 @@ public class ShardStateAction extends AbstractComponent {
             }
     
             @Override
    -        public BatchResult execute(ClusterState currentState, List tasks) throws Exception {
    -            BatchResult.Builder batchResultBuilder = BatchResult.builder();
    +        public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception {
    +            ClusterTasksResult.Builder batchResultBuilder = ClusterTasksResult.builder();
                 List tasksToBeApplied = new ArrayList<>();
                 List failedShardsToBeApplied = new ArrayList<>();
                 List staleShardsToBeApplied = new ArrayList<>();
    @@ -354,9 +356,8 @@ public class ShardStateAction extends AbstractComponent {
             shardStarted(shardRouting, message, listener, clusterService.state());
         }
         public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener, ClusterState currentState) {
    -        ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext());
             ShardEntry shardEntry = new ShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, null);
    -        sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardEntry, listener);
    +        sendShardAction(SHARD_STARTED_ACTION_NAME, currentState, shardEntry, listener);
         }
     
         private static class ShardStartedTransportHandler implements TransportRequestHandler {
    @@ -364,7 +365,7 @@ public class ShardStateAction extends AbstractComponent {
             private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor;
             private final Logger logger;
     
    -        public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, Logger logger) {
    +        ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, Logger logger) {
                 this.clusterService = clusterService;
                 this.shardStartedClusterStateTaskExecutor = shardStartedClusterStateTaskExecutor;
                 this.logger = logger;
    @@ -393,8 +394,8 @@ public class ShardStateAction extends AbstractComponent {
             }
     
             @Override
    -        public BatchResult execute(ClusterState currentState, List tasks) throws Exception {
    -            BatchResult.Builder builder = BatchResult.builder();
    +        public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception {
    +            ClusterTasksResult.Builder builder = ClusterTasksResult.builder();
                 List tasksToBeApplied = new ArrayList<>();
                 List shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
                 Set seenShardRoutings = new HashSet<>(); // to prevent duplicates
    diff --git a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java
    index 12e6ee0f7ec..2bdf560580b 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java
    @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.block;
     
     import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
     import org.elasticsearch.cluster.AbstractDiffable;
    +import org.elasticsearch.cluster.Diff;
     import org.elasticsearch.cluster.metadata.IndexMetaData;
     import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
     import org.elasticsearch.common.collect.ImmutableOpenMap;
    @@ -48,8 +49,6 @@ import static java.util.stream.Stream.concat;
     public class ClusterBlocks extends AbstractDiffable {
         public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(emptySet(), ImmutableOpenMap.of());
     
    -    public static final ClusterBlocks PROTO = EMPTY_CLUSTER_BLOCK;
    -
         private final Set global;
     
         private final ImmutableOpenMap> indicesBlocks;
    @@ -59,23 +58,7 @@ public class ClusterBlocks extends AbstractDiffable {
         ClusterBlocks(Set global, ImmutableOpenMap> indicesBlocks) {
             this.global = global;
             this.indicesBlocks = indicesBlocks;
    -
    -        levelHolders = new ImmutableLevelHolder[ClusterBlockLevel.values().length];
    -        for (final ClusterBlockLevel level : ClusterBlockLevel.values()) {
    -            Predicate containsLevel = block -> block.contains(level);
    -            Set newGlobal = unmodifiableSet(global.stream()
    -                    .filter(containsLevel)
    -                    .collect(toSet()));
    -
    -            ImmutableOpenMap.Builder> indicesBuilder = ImmutableOpenMap.builder();
    -            for (ObjectObjectCursor> entry : indicesBlocks) {
    -                indicesBuilder.put(entry.key, unmodifiableSet(entry.value.stream()
    -                        .filter(containsLevel)
    -                        .collect(toSet())));
    -            }
    -
    -            levelHolders[level.id()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build());
    -        }
    +        levelHolders = generateLevelHolders(global, indicesBlocks);
         }
     
         public Set global() {
    @@ -98,6 +81,27 @@ public class ClusterBlocks extends AbstractDiffable {
             return indices(level).getOrDefault(index, emptySet());
         }
     
    +    private static ImmutableLevelHolder[] generateLevelHolders(Set global,
    +                                                               ImmutableOpenMap> indicesBlocks) {
    +        ImmutableLevelHolder[] levelHolders = new ImmutableLevelHolder[ClusterBlockLevel.values().length];
    +        for (final ClusterBlockLevel level : ClusterBlockLevel.values()) {
    +            Predicate containsLevel = block -> block.contains(level);
    +            Set newGlobal = unmodifiableSet(global.stream()
    +                .filter(containsLevel)
    +                .collect(toSet()));
    +
    +            ImmutableOpenMap.Builder> indicesBuilder = ImmutableOpenMap.builder();
    +            for (ObjectObjectCursor> entry : indicesBlocks) {
    +                indicesBuilder.put(entry.key, unmodifiableSet(entry.value.stream()
    +                    .filter(containsLevel)
    +                    .collect(toSet())));
    +            }
    +
    +            levelHolders[level.id()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build());
    +        }
    +        return levelHolders;
    +    }
    +
         /**
          * Returns true if one of the global blocks as its disable state persistence flag set.
          */
    @@ -239,15 +243,16 @@ public class ClusterBlocks extends AbstractDiffable {
             }
         }
     
    -    @Override
    -    public ClusterBlocks readFrom(StreamInput in) throws IOException {
    +    public ClusterBlocks(StreamInput in) throws IOException {
             Set global = readBlockSet(in);
             int size = in.readVInt();
             ImmutableOpenMap.Builder> indicesBuilder = ImmutableOpenMap.builder(size);
             for (int j = 0; j < size; j++) {
                 indicesBuilder.put(in.readString().intern(), readBlockSet(in));
             }
    -        return new ClusterBlocks(global, indicesBuilder.build());
    +        this.global = global;
    +        this.indicesBlocks = indicesBuilder.build();
    +        levelHolders = generateLevelHolders(global, indicesBlocks);
         }
     
         private static Set readBlockSet(StreamInput in) throws IOException {
    @@ -259,6 +264,10 @@ public class ClusterBlocks extends AbstractDiffable {
             return unmodifiableSet(blocks);
         }
     
    +    public static Diff readDiffFrom(StreamInput in) throws IOException {
    +        return AbstractDiffable.readDiffFrom(ClusterBlocks::new, in);
    +    }
    +
         static class ImmutableLevelHolder {
     
             static final ImmutableLevelHolder EMPTY = new ImmutableLevelHolder(emptySet(), ImmutableOpenMap.of());
    @@ -383,9 +392,5 @@ public class ClusterBlocks extends AbstractDiffable {
                 }
                 return new ClusterBlocks(unmodifiableSet(new HashSet<>(global)), indicesBuilder.build());
             }
    -
    -        public static ClusterBlocks readClusterBlocks(StreamInput in) throws IOException {
    -            return PROTO.readFrom(in);
    -        }
         }
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java
    index e62a3935ad5..8071871fbfe 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java
    @@ -21,13 +21,16 @@ package org.elasticsearch.cluster.metadata;
     
     import org.elasticsearch.ElasticsearchGenerationException;
     import org.elasticsearch.cluster.AbstractDiffable;
    +import org.elasticsearch.cluster.Diff;
     import org.elasticsearch.common.Strings;
    +import org.elasticsearch.common.bytes.BytesArray;
     import org.elasticsearch.common.compress.CompressedXContent;
     import org.elasticsearch.common.io.stream.StreamInput;
     import org.elasticsearch.common.io.stream.StreamOutput;
     import org.elasticsearch.common.xcontent.ToXContent;
     import org.elasticsearch.common.xcontent.XContentBuilder;
     import org.elasticsearch.common.xcontent.XContentFactory;
    +import org.elasticsearch.common.xcontent.XContentHelper;
     import org.elasticsearch.common.xcontent.XContentParser;
     
     import java.io.IOException;
    @@ -39,8 +42,6 @@ import static java.util.Collections.emptySet;
     
     public class AliasMetaData extends AbstractDiffable {
     
    -    public static final AliasMetaData PROTO = new AliasMetaData("", null, null, null);
    -
         private final String alias;
     
         private final CompressedXContent filter;
    @@ -171,22 +172,29 @@ public class AliasMetaData extends AbstractDiffable {
     
         }
     
    -    @Override
    -    public AliasMetaData readFrom(StreamInput in) throws IOException {
    -        String alias = in.readString();
    -        CompressedXContent filter = null;
    +    public AliasMetaData(StreamInput in) throws IOException {
    +        alias = in.readString();
             if (in.readBoolean()) {
                 filter = CompressedXContent.readCompressedString(in);
    +        } else {
    +            filter = null;
             }
    -        String indexRouting = null;
             if (in.readBoolean()) {
                 indexRouting = in.readString();
    +        } else {
    +            indexRouting = null;
             }
    -        String searchRouting = null;
             if (in.readBoolean()) {
                 searchRouting = in.readString();
    +            searchRoutingValues = Collections.unmodifiableSet(Strings.splitStringByCommaToSet(searchRouting));
    +        } else {
    +            searchRouting = null;
    +            searchRoutingValues = emptySet();
             }
    -        return new AliasMetaData(alias, filter, indexRouting, searchRouting);
    +    }
    +
    +    public static Diff readDiffFrom(StreamInput in) throws IOException {
    +        return readDiffFrom(AliasMetaData::new, in);
         }
     
         public static class Builder {
    @@ -225,14 +233,7 @@ public class AliasMetaData extends AbstractDiffable {
                     this.filter = null;
                     return this;
                 }
    -            try {
    -                try (XContentParser parser = XContentFactory.xContent(filter).createParser(filter)) {
    -                    filter(parser.mapOrdered());
    -                }
    -                return this;
    -            } catch (IOException e) {
    -                throw new ElasticsearchGenerationException("Failed to generate [" + filter + "]", e);
    -            }
    +            return filter(XContentHelper.convertToMap(XContentFactory.xContent(filter), filter, true));
             }
     
             public Builder filter(Map filter) {
    @@ -286,11 +287,7 @@ public class AliasMetaData extends AbstractDiffable {
                     if (binary) {
                         builder.field("filter", aliasMetaData.filter.compressed());
                     } else {
    -                    byte[] data = aliasMetaData.filter().uncompressed();
    -                    try (XContentParser parser = XContentFactory.xContent(data).createParser(data)) {
    -                        Map filter = parser.mapOrdered();
    -                        builder.field("filter", filter);
    -                    }
    +                    builder.field("filter", XContentHelper.convertToMap(new BytesArray(aliasMetaData.filter().uncompressed()), true).v2());
                     }
                 }
                 if (aliasMetaData.indexRouting() != null) {
    @@ -336,14 +333,6 @@ public class AliasMetaData extends AbstractDiffable {
                 }
                 return builder.build();
             }
    -
    -        public void writeTo(AliasMetaData aliasMetaData, StreamOutput out) throws IOException {
    -            aliasMetaData.writeTo(out);
    -        }
    -
    -        public static AliasMetaData readFrom(StreamInput in) throws IOException {
    -            return PROTO.readFrom(in);
    -        }
         }
     
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java
    index 4ad9b7e5317..786bd9af78a 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java
    @@ -121,7 +121,7 @@ public interface AliasOrIndex {
                             }
     
                             @Override
    -                        public final void remove() {
    +                        public void remove() {
                                 throw new UnsupportedOperationException();
                             }
     
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java
    index 29faa0f7956..bce6e45c793 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java
    @@ -25,7 +25,9 @@ import org.elasticsearch.common.Strings;
     import org.elasticsearch.common.component.AbstractComponent;
     import org.elasticsearch.common.inject.Inject;
     import org.elasticsearch.common.settings.Settings;
    +import org.elasticsearch.common.xcontent.NamedXContentRegistry;
     import org.elasticsearch.common.xcontent.XContentFactory;
    +import org.elasticsearch.common.xcontent.XContentHelper;
     import org.elasticsearch.common.xcontent.XContentParser;
     import org.elasticsearch.index.query.QueryBuilder;
     import org.elasticsearch.index.query.QueryParseContext;
    @@ -74,8 +76,8 @@ public class AliasValidator extends AbstractComponent {
         public void validateAliasStandalone(Alias alias) {
             validateAliasStandalone(alias.name(), alias.indexRouting());
             if (Strings.hasLength(alias.filter())) {
    -            try (XContentParser parser = XContentFactory.xContent(alias.filter()).createParser(alias.filter())) {
    -                parser.map();
    +            try {
    +                XContentHelper.convertToMap(XContentFactory.xContent(alias.filter()), alias.filter(), false);
                 } catch (Exception e) {
                     throw new IllegalArgumentException("failed to parse filter for alias [" + alias.name() + "]", e);
                 }
    @@ -113,9 +115,10 @@ public class AliasValidator extends AbstractComponent {
          * provided {@link org.elasticsearch.index.query.QueryShardContext}
          * @throws IllegalArgumentException if the filter is not valid
          */
    -    public void validateAliasFilter(String alias, String filter, QueryShardContext queryShardContext) {
    +    public void validateAliasFilter(String alias, String filter, QueryShardContext queryShardContext,
    +            NamedXContentRegistry xContentRegistry) {
             assert queryShardContext != null;
    -        try (XContentParser parser = XContentFactory.xContent(filter).createParser(filter)) {
    +        try (XContentParser parser = XContentFactory.xContent(filter).createParser(xContentRegistry, filter)) {
                 validateAliasFilter(parser, queryShardContext);
             } catch (Exception e) {
                 throw new IllegalArgumentException("failed to parse filter for alias [" + alias + "]", e);
    @@ -127,9 +130,10 @@ public class AliasValidator extends AbstractComponent {
          * provided {@link org.elasticsearch.index.query.QueryShardContext}
          * @throws IllegalArgumentException if the filter is not valid
          */
    -    public void validateAliasFilter(String alias, byte[] filter, QueryShardContext queryShardContext) {
    +    public void validateAliasFilter(String alias, byte[] filter, QueryShardContext queryShardContext,
    +            NamedXContentRegistry xContentRegistry) {
             assert queryShardContext != null;
    -        try (XContentParser parser = XContentFactory.xContent(filter).createParser(filter)) {
    +        try (XContentParser parser = XContentFactory.xContent(filter).createParser(xContentRegistry, filter)) {
                 validateAliasFilter(parser, queryShardContext);
             } catch (Exception e) {
                 throw new IllegalArgumentException("failed to parse filter for alias [" + alias + "]", e);
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java
    index 4b4a8e54d7c..fa30abe5a73 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java
    @@ -33,7 +33,7 @@ final class AutoExpandReplicas {
         public static final Setting SETTING = new Setting<>(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "false", (value) -> {
             final int min;
             final int max;
    -        if (Booleans.parseBoolean(value, true) == false) {
    +        if (Booleans.isFalse(value)) {
                 return new AutoExpandReplicas(0, 0, false);
             }
             final int dash = value.indexOf('-');
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java
    index 1a2cfe90ad6..d60617ea642 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java
    @@ -20,9 +20,8 @@
     package org.elasticsearch.cluster.metadata;
     
     import org.elasticsearch.cluster.Diff;
    +import org.elasticsearch.cluster.NamedDiff;
     import org.elasticsearch.common.ParseField;
    -import org.elasticsearch.common.ParseFieldMatcher;
    -import org.elasticsearch.common.ParseFieldMatcherSupplier;
     import org.elasticsearch.common.io.stream.StreamInput;
     import org.elasticsearch.common.io.stream.StreamOutput;
     import org.elasticsearch.common.io.stream.Writeable;
    @@ -67,10 +66,9 @@ public final class IndexGraveyard implements MetaData.Custom {
                                                                                          500, // the default maximum number of tombstones
                                                                                          Setting.Property.NodeScope);
     
    -    public static final IndexGraveyard PROTO = new IndexGraveyard(new ArrayList<>());
         public static final String TYPE = "index-graveyard";
         private static final ParseField TOMBSTONES_FIELD = new ParseField("tombstones");
    -    private static final ObjectParser, ParseFieldMatcherSupplier> GRAVEYARD_PARSER;
    +    private static final ObjectParser, Void> GRAVEYARD_PARSER;
         static {
             GRAVEYARD_PARSER = new ObjectParser<>("index_graveyard", ArrayList::new);
             GRAVEYARD_PARSER.declareObjectArray(List::addAll, Tombstone.getParser(), TOMBSTONES_FIELD);
    @@ -83,7 +81,7 @@ public final class IndexGraveyard implements MetaData.Custom {
             tombstones = Collections.unmodifiableList(list);
         }
     
    -    private IndexGraveyard(final StreamInput in) throws IOException {
    +    public IndexGraveyard(final StreamInput in) throws IOException {
             final int queueSize = in.readVInt();
             List tombstones = new ArrayList<>(queueSize);
             for (int i = 0; i < queueSize; i++) {
    @@ -92,12 +90,8 @@ public final class IndexGraveyard implements MetaData.Custom {
             this.tombstones = Collections.unmodifiableList(tombstones);
         }
     
    -    public static IndexGraveyard fromStream(final StreamInput in) throws IOException {
    -        return new IndexGraveyard(in);
    -    }
    -
         @Override
    -    public String type() {
    +    public String getWriteableName() {
             return TYPE;
         }
     
    @@ -144,8 +138,8 @@ public final class IndexGraveyard implements MetaData.Custom {
             return builder.endArray();
         }
     
    -    public IndexGraveyard fromXContent(final XContentParser parser) throws IOException {
    -        return new IndexGraveyard(GRAVEYARD_PARSER.parse(parser, () -> ParseFieldMatcher.STRICT));
    +    public static IndexGraveyard fromXContent(final XContentParser parser) throws IOException {
    +        return new IndexGraveyard(GRAVEYARD_PARSER.parse(parser, null));
         }
     
         @Override
    @@ -161,19 +155,13 @@ public final class IndexGraveyard implements MetaData.Custom {
             }
         }
     
    -    @Override
    -    public IndexGraveyard readFrom(final StreamInput in) throws IOException {
    -        return new IndexGraveyard(in);
    -    }
    -
         @Override
         @SuppressWarnings("unchecked")
         public Diff diff(final MetaData.Custom previous) {
             return new IndexGraveyardDiff((IndexGraveyard) previous, this);
         }
     
    -    @Override
    -    public Diff readDiffFrom(final StreamInput in) throws IOException {
    +    public static NamedDiff readDiffFrom(final StreamInput in) throws IOException {
             return new IndexGraveyardDiff(in);
         }
     
    @@ -273,7 +261,7 @@ public final class IndexGraveyard implements MetaData.Custom {
         /**
          * A class representing a diff of two IndexGraveyard objects.
          */
    -    public static final class IndexGraveyardDiff implements Diff {
    +    public static final class IndexGraveyardDiff implements NamedDiff {
     
             private final List added;
             private final int removedCount;
    @@ -349,6 +337,11 @@ public final class IndexGraveyard implements MetaData.Custom {
             public int getRemovedCount() {
                 return removedCount;
             }
    +
    +        @Override
    +        public String getWriteableName() {
    +            return TYPE;
    +        }
         }
     
         /**
    @@ -359,16 +352,17 @@ public final class IndexGraveyard implements MetaData.Custom {
             private static final String INDEX_KEY = "index";
             private static final String DELETE_DATE_IN_MILLIS_KEY = "delete_date_in_millis";
             private static final String DELETE_DATE_KEY = "delete_date";
    -        private static final ObjectParser TOMBSTONE_PARSER;
    +        private static final ObjectParser TOMBSTONE_PARSER;
             static {
                 TOMBSTONE_PARSER = new ObjectParser<>("tombstoneEntry", Tombstone.Builder::new);
    -            TOMBSTONE_PARSER.declareObject(Tombstone.Builder::index, Index::parseIndex, new ParseField(INDEX_KEY));
    +            TOMBSTONE_PARSER.declareObject(Tombstone.Builder::index, (parser, context) -> Index.fromXContent(parser),
    +                    new ParseField(INDEX_KEY));
                 TOMBSTONE_PARSER.declareLong(Tombstone.Builder::deleteDateInMillis, new ParseField(DELETE_DATE_IN_MILLIS_KEY));
                 TOMBSTONE_PARSER.declareString((b, s) -> {}, new ParseField(DELETE_DATE_KEY));
             }
     
    -        static ContextParser getParser() {
    -            return (p, c) -> TOMBSTONE_PARSER.apply(p, c).build();
    +        static ContextParser getParser() {
    +            return (parser, context) -> TOMBSTONE_PARSER.apply(parser, null).build();
             }
     
             private final Index index;
    @@ -443,7 +437,7 @@ public final class IndexGraveyard implements MetaData.Custom {
             }
     
             public static Tombstone fromXContent(final XContentParser parser) throws IOException {
    -            return TOMBSTONE_PARSER.parse(parser, () -> ParseFieldMatcher.STRICT).build();
    +            return TOMBSTONE_PARSER.parse(parser, null).build();
             }
     
             /**
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
    index a1f217e1377..68575634a1e 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
    @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.LongArrayList;
     import com.carrotsearch.hppc.cursors.IntObjectCursor;
     import com.carrotsearch.hppc.cursors.ObjectCursor;
     import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
    +
     import org.elasticsearch.Version;
     import org.elasticsearch.action.support.ActiveShardCount;
     import org.elasticsearch.cluster.Diff;
    @@ -33,21 +34,23 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
     import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
     import org.elasticsearch.cluster.routing.allocation.IndexMetaDataUpdater;
     import org.elasticsearch.common.Nullable;
    -import org.elasticsearch.common.ParseFieldMatcher;
    +import org.elasticsearch.common.bytes.BytesArray;
     import org.elasticsearch.common.collect.ImmutableOpenIntMap;
     import org.elasticsearch.common.collect.ImmutableOpenMap;
     import org.elasticsearch.common.collect.MapBuilder;
     import org.elasticsearch.common.compress.CompressedXContent;
     import org.elasticsearch.common.io.stream.StreamInput;
     import org.elasticsearch.common.io.stream.StreamOutput;
    +import org.elasticsearch.common.logging.DeprecationLogger;
    +import org.elasticsearch.common.logging.ESLoggerFactory;
     import org.elasticsearch.common.settings.Setting;
     import org.elasticsearch.common.settings.Setting.Property;
     import org.elasticsearch.common.settings.Settings;
     import org.elasticsearch.common.settings.loader.SettingsLoader;
    -import org.elasticsearch.common.xcontent.FromXContentBuilder;
     import org.elasticsearch.common.xcontent.ToXContent;
     import org.elasticsearch.common.xcontent.XContentBuilder;
     import org.elasticsearch.common.xcontent.XContentFactory;
    +import org.elasticsearch.common.xcontent.XContentHelper;
     import org.elasticsearch.common.xcontent.XContentParser;
     import org.elasticsearch.common.xcontent.XContentType;
     import org.elasticsearch.gateway.MetaDataStateFormat;
    @@ -70,13 +73,18 @@ import java.util.Map;
     import java.util.Set;
     import java.util.function.Function;
     
    +import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.IP_VALIDATOR;
     import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
     import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
     import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
     import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
     
    -public class IndexMetaData implements Diffable, FromXContentBuilder, ToXContent {
    +public class IndexMetaData implements Diffable, ToXContent {
     
    +    /**
    +     * This class will be removed in v7.0
    +     */
    +    @Deprecated
         public interface Custom extends Diffable, ToXContent {
     
             String type();
    @@ -85,6 +93,16 @@ public class IndexMetaData implements Diffable, FromXContentBuild
     
             Custom fromXContent(XContentParser parser) throws IOException;
     
    +        /**
    +         * Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput
    +         */
    +        Diff readDiffFrom(StreamInput in) throws IOException;
    +
    +        /**
    +         * Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged.
    +         */
    +        Custom readFrom(StreamInput in) throws IOException;
    +
             /**
              * Merges from this to another, with this being more important, i.e., if something exists in this and another,
              * this will prevail.
    @@ -121,7 +139,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild
         public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE));
         public static final ClusterBlock INDEX_METADATA_BLOCK = new ClusterBlock(9, "index metadata (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.METADATA_READ));
     
    -    public static enum State {
    +    public enum State {
             OPEN((byte) 0),
             CLOSE((byte) 1);
     
    @@ -176,11 +194,15 @@ public class IndexMetaData implements Diffable, FromXContentBuild
             Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, Property.Dynamic, Property.IndexScope);
         public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas";
         public static final Setting INDEX_SHADOW_REPLICAS_SETTING =
    -        Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, Property.IndexScope);
    +        Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, Property.IndexScope, Property.Deprecated);
    +
    +    public static final String SETTING_ROUTING_PARTITION_SIZE = "index.routing_partition_size";
    +    public static final Setting INDEX_ROUTING_PARTITION_SIZE_SETTING =
    +            Setting.intSetting(SETTING_ROUTING_PARTITION_SIZE, 1, 1, Property.IndexScope);
     
         public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem";
         public static final Setting INDEX_SHARED_FILESYSTEM_SETTING =
    -        Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, Property.IndexScope);
    +        Setting.boolSetting(SETTING_SHARED_FILESYSTEM, INDEX_SHADOW_REPLICAS_SETTING, Property.IndexScope, Property.Deprecated);
     
         public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas";
         public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING;
    @@ -221,18 +243,19 @@ public class IndexMetaData implements Diffable, FromXContentBuild
             new Setting<>(SETTING_DATA_PATH, "", Function.identity(), Property.IndexScope);
         public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
         public static final Setting INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING =
    -        Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, Property.Dynamic, Property.IndexScope);
    +        Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false,
    +            Property.Dynamic, Property.IndexScope, Property.Deprecated);
         public static final String INDEX_UUID_NA_VALUE = "_na_";
     
         public static final String INDEX_ROUTING_REQUIRE_GROUP_PREFIX = "index.routing.allocation.require";
         public static final String INDEX_ROUTING_INCLUDE_GROUP_PREFIX = "index.routing.allocation.include";
         public static final String INDEX_ROUTING_EXCLUDE_GROUP_PREFIX = "index.routing.allocation.exclude";
         public static final Setting INDEX_ROUTING_REQUIRE_GROUP_SETTING =
    -        Setting.groupSetting(INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ".", Property.Dynamic, Property.IndexScope);
    +        Setting.groupSetting(INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.IndexScope);
         public static final Setting INDEX_ROUTING_INCLUDE_GROUP_SETTING =
    -        Setting.groupSetting(INDEX_ROUTING_INCLUDE_GROUP_PREFIX + ".", Property.Dynamic, Property.IndexScope);
    +        Setting.groupSetting(INDEX_ROUTING_INCLUDE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.IndexScope);
         public static final Setting INDEX_ROUTING_EXCLUDE_GROUP_SETTING =
    -        Setting.groupSetting(INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + ".", Property.Dynamic, Property.IndexScope);
    +        Setting.groupSetting(INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.IndexScope);
         public static final Setting INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING =
             Setting.groupSetting("index.routing.allocation.initial_recovery."); // this is only setable internally not a registered setting!!
     
    @@ -246,10 +269,6 @@ public class IndexMetaData implements Diffable, FromXContentBuild
                           Setting.Property.Dynamic,
                           Setting.Property.IndexScope);
     
    -    public static final IndexMetaData PROTO = IndexMetaData.builder("")
    -            .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
    -            .numberOfShards(1).numberOfReplicas(0).build();
    -
         public static final String KEY_IN_SYNC_ALLOCATIONS = "in_sync_allocations";
         static final String KEY_VERSION = "version";
         static final String KEY_ROUTING_NUM_SHARDS = "routing_num_shards";
    @@ -262,6 +281,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild
         public static final String INDEX_STATE_FILE_PREFIX = "state-";
         private final int routingNumShards;
         private final int routingFactor;
    +    private final int routingPartitionSize;
     
         private final int numberOfShards;
         private final int numberOfReplicas;
    @@ -300,7 +320,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild
                               ImmutableOpenMap customs, ImmutableOpenIntMap> inSyncAllocationIds,
                               DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters,
                               Version indexCreatedVersion, Version indexUpgradedVersion, org.apache.lucene.util.Version minimumCompatibleLuceneVersion,
    -                          int routingNumShards, ActiveShardCount waitForActiveShards) {
    +                          int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards) {
     
             this.index = index;
             this.version = version;
    @@ -324,6 +344,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild
             this.minimumCompatibleLuceneVersion = minimumCompatibleLuceneVersion;
             this.routingNumShards = routingNumShards;
             this.routingFactor = routingNumShards / numberOfShards;
    +        this.routingPartitionSize = routingPartitionSize;
             this.waitForActiveShards = waitForActiveShards;
             assert numberOfShards * routingFactor == routingNumShards :  routingNumShards + " must be a multiple of " + numberOfShards;
         }
    @@ -403,6 +424,14 @@ public class IndexMetaData implements Diffable, FromXContentBuild
             return numberOfReplicas;
         }
     
    +    public int getRoutingPartitionSize() {
    +        return routingPartitionSize;
    +    }
    +
    +    public boolean isRoutingPartitionedIndex() {
    +        return routingPartitionSize != 1;
    +    }
    +
         public int getTotalNumberOfShards() {
             return totalNumberOfShards;
         }
    @@ -564,13 +593,11 @@ public class IndexMetaData implements Diffable, FromXContentBuild
             return new IndexMetaDataDiff(previousState, this);
         }
     
    -    @Override
    -    public Diff readDiffFrom(StreamInput in) throws IOException {
    +    public static Diff readDiffFrom(StreamInput in) throws IOException {
             return new IndexMetaDataDiff(in);
         }
     
    -    @Override
    -    public IndexMetaData fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException {
    +    public static IndexMetaData fromXContent(XContentParser parser) throws IOException {
             return Builder.fromXContent(parser);
         }
     
    @@ -593,7 +620,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild
             private final Diff> customs;
             private final Diff>> inSyncAllocationIds;
     
    -        public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) {
    +        IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) {
                 index = after.index.getName();
                 version = after.version;
                 routingNumShards = after.routingNumShards;
    @@ -607,15 +634,17 @@ public class IndexMetaData implements Diffable, FromXContentBuild
                     DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
             }
     
    -        public IndexMetaDataDiff(StreamInput in) throws IOException {
    +        IndexMetaDataDiff(StreamInput in) throws IOException {
                 index = in.readString();
                 routingNumShards = in.readInt();
                 version = in.readLong();
                 state = State.fromId(in.readByte());
                 settings = Settings.readSettingsFromStream(in);
                 primaryTerms = in.readVLongArray();
    -            mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO);
    -            aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO);
    +            mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData::new,
    +                MappingMetaData::readDiffFrom);
    +            aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData::new,
    +                AliasMetaData::readDiffFrom);
                 customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
                     new DiffableUtils.DiffableValueSerializer() {
                         @Override
    @@ -623,6 +652,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild
                             return lookupPrototypeSafe(key).readFrom(in);
                         }
     
    +                    @SuppressWarnings("unchecked")
                         @Override
                         public Diff readDiff(StreamInput in, String key) throws IOException {
                             return lookupPrototypeSafe(key).readDiffFrom(in);
    @@ -662,8 +692,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild
             }
         }
     
    -    @Override
    -    public IndexMetaData readFrom(StreamInput in) throws IOException {
    +    public static IndexMetaData readFrom(StreamInput in) throws IOException {
             Builder builder = new Builder(in.readString());
             builder.version(in.readLong());
             builder.setRoutingNumShards(in.readInt());
    @@ -672,12 +701,12 @@ public class IndexMetaData implements Diffable, FromXContentBuild
             builder.primaryTerms(in.readVLongArray());
             int mappingsSize = in.readVInt();
             for (int i = 0; i < mappingsSize; i++) {
    -            MappingMetaData mappingMd = MappingMetaData.PROTO.readFrom(in);
    +            MappingMetaData mappingMd = new MappingMetaData(in);
                 builder.putMapping(mappingMd);
             }
             int aliasesSize = in.readVInt();
             for (int i = 0; i < aliasesSize; i++) {
    -            AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in);
    +            AliasMetaData aliasMd = new AliasMetaData(in);
                 builder.putAlias(aliasMd);
             }
             int customSize = in.readVInt();
    @@ -799,6 +828,11 @@ public class IndexMetaData implements Diffable, FromXContentBuild
                 return routingNumShards == null ? numberOfShards() : routingNumShards;
             }
     
    +        /**
    +         * Returns the number of shards.
    +         *
    +         * @return the provided value or -1 if it has not been set.
    +         */
             public int numberOfShards() {
                 return settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1);
             }
    @@ -808,10 +842,29 @@ public class IndexMetaData implements Diffable, FromXContentBuild
                 return this;
             }
     
    +        /**
    +         * Returns the number of replicas.
    +         *
    +         * @return the provided value or -1 if it has not been set.
    +         */
             public int numberOfReplicas() {
                 return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1);
             }
     
    +        public Builder routingPartitionSize(int routingPartitionSize) {
    +            settings = Settings.builder().put(settings).put(SETTING_ROUTING_PARTITION_SIZE, routingPartitionSize).build();
    +            return this;
    +        }
    +
    +        /**
    +         * Returns the routing partition size.
    +         *
    +         * @return the provided value or -1 if it has not been set.
    +         */
    +        public int routingPartitionSize() {
    +            return settings.getAsInt(SETTING_ROUTING_PARTITION_SIZE, -1);
    +        }
    +
             public Builder creationDate(long creationDate) {
                 settings = Settings.builder().put(settings).put(SETTING_CREATION_DATE, creationDate).build();
                 return this;
    @@ -831,9 +884,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild
             }
     
             public Builder putMapping(String type, String source) throws IOException {
    -            try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
    -                putMapping(new MappingMetaData(type, parser.mapOrdered()));
    -            }
    +            putMapping(new MappingMetaData(type, XContentHelper.convertToMap(XContentFactory.xContent(source), source, true)));
                 return this;
             }
     
    @@ -877,7 +928,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild
             }
     
             public Builder putInSyncAllocationIds(int shardId, Set allocationIds) {
    -            inSyncAllocationIds.put(shardId, new HashSet(allocationIds));
    +            inSyncAllocationIds.put(shardId, new HashSet<>(allocationIds));
                 return this;
             }
     
    @@ -956,6 +1007,12 @@ public class IndexMetaData implements Diffable, FromXContentBuild
                     throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
                 }
     
    +            int routingPartitionSize = INDEX_ROUTING_PARTITION_SIZE_SETTING.get(settings);
    +            if (routingPartitionSize != 1 && routingPartitionSize >= getRoutingNumShards()) {
    +                throw new IllegalArgumentException("routing partition size [" + routingPartitionSize + "] should be a positive number"
    +                        + " less than the number of shards [" + getRoutingNumShards() + "] for [" + index + "]");
    +            }
    +
                 // fill missing slots in inSyncAllocationIds with empty set if needed and make all entries immutable
                 ImmutableOpenIntMap.Builder> filledInSyncAllocationIds = ImmutableOpenIntMap.builder();
                 for (int i = 0; i < numberOfShards; i++) {
    @@ -1024,7 +1081,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild
                 final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
                 return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
                     tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters,
    -                indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion, getRoutingNumShards(), waitForActiveShards);
    +                indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards);
             }
     
             public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
    @@ -1047,11 +1104,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild
                     if (binary) {
                         builder.value(cursor.value.source().compressed());
                     } else {
    -                    byte[] data = cursor.value.source().uncompressed();
    -                    try (XContentParser parser = XContentFactory.xContent(data).createParser(data)) {
    -                        Map mapping = parser.mapOrdered();
    -                        builder.map(mapping);
    -                    }
    +                    builder.map(XContentHelper.convertToMap(new BytesArray(cursor.value.source().uncompressed()), true).v2());
                     }
                 }
                 builder.endArray();
    @@ -1203,12 +1256,10 @@ public class IndexMetaData implements Diffable, FromXContentBuild
                 }
                 return builder.build();
             }
    -
    -        public static IndexMetaData readFrom(StreamInput in) throws IOException {
    -            return PROTO.readFrom(in);
    -        }
         }
     
    +    private static final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(IndexMetaData.class));
    +
         /**
          * Returns true iff the given settings indicate that the index
          * associated with these settings allocates it's shards on a shared
    @@ -1216,8 +1267,10 @@ public class IndexMetaData implements Diffable, FromXContentBuild
          * is the returned value from
          * {@link #isIndexUsingShadowReplicas(org.elasticsearch.common.settings.Settings)}.
          */
    -    public static boolean isOnSharedFilesystem(Settings settings) {
    -        return settings.getAsBoolean(SETTING_SHARED_FILESYSTEM, isIndexUsingShadowReplicas(settings));
    +    public boolean isOnSharedFilesystem(Settings settings) {
    +        // don't use the setting directly, not to trigger verbose deprecation logging
    +        return settings.getAsBooleanLenientForPreEs6Indices(
    +            this.indexCreatedVersion, SETTING_SHARED_FILESYSTEM, isIndexUsingShadowReplicas(settings), deprecationLogger);
         }
     
         /**
    @@ -1225,8 +1278,13 @@ public class IndexMetaData implements Diffable, FromXContentBuild
          * with these settings uses shadow replicas. Otherwise false. The default
          * setting for this is false.
          */
    -    public static boolean isIndexUsingShadowReplicas(Settings settings) {
    -        return settings.getAsBoolean(SETTING_SHADOW_REPLICAS, false);
    +    public boolean isIndexUsingShadowReplicas() {
    +        return isIndexUsingShadowReplicas(this.settings);
    +    }
    +
    +    public boolean isIndexUsingShadowReplicas(Settings settings) {
    +        // don't use the setting directly, not to trigger verbose deprecation logging
    +        return settings.getAsBooleanLenientForPreEs6Indices(this.indexCreatedVersion, SETTING_SHADOW_REPLICAS, false, deprecationLogger);
         }
     
         /**
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java
    index 0c7a2db68dc..168fe2ad7f2 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java
    @@ -502,11 +502,11 @@ public class IndexNameExpressionResolver extends AbstractComponent {
                 this(state, options, System.currentTimeMillis(), preserveAliases);
             }
     
    -        public Context(ClusterState state, IndicesOptions options, long startTime) {
    +        Context(ClusterState state, IndicesOptions options, long startTime) {
                this(state, options, startTime, false);
             }
     
    -        public Context(ClusterState state, IndicesOptions options, long startTime, boolean preserveAliases) {
    +        Context(ClusterState state, IndicesOptions options, long startTime, boolean preserveAliases) {
                 this.state = state;
                 this.options = options;
                 this.startTime = startTime;
    @@ -754,7 +754,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
             private final String defaultDateFormatterPattern;
             private final DateTimeFormatter defaultDateFormatter;
     
    -        public DateMathExpressionResolver(Settings settings) {
    +        DateMathExpressionResolver(Settings settings) {
                 String defaultTimeZoneId = settings.get("date_math_expression_resolver.default_time_zone", "UTC");
                 this.defaultTimeZone = DateTimeZone.forID(defaultTimeZoneId);
                 defaultDateFormatterPattern = settings.get("date_math_expression_resolver.default_date_format", "YYYY.MM.dd");
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java
    index feabc380c4e..5bba34904d0 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java
    @@ -23,7 +23,10 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
     
     import org.elasticsearch.Version;
     import org.elasticsearch.cluster.AbstractDiffable;
    +import org.elasticsearch.cluster.Diff;
     import org.elasticsearch.common.Nullable;
    +import org.elasticsearch.common.bytes.BytesArray;
    +import org.elasticsearch.common.bytes.BytesReference;
     import org.elasticsearch.common.collect.ImmutableOpenMap;
     import org.elasticsearch.common.collect.MapBuilder;
     import org.elasticsearch.common.compress.CompressedXContent;
    @@ -37,6 +40,7 @@ import org.elasticsearch.common.util.set.Sets;
     import org.elasticsearch.common.xcontent.ToXContent;
     import org.elasticsearch.common.xcontent.XContentBuilder;
     import org.elasticsearch.common.xcontent.XContentFactory;
    +import org.elasticsearch.common.xcontent.XContentHelper;
     import org.elasticsearch.common.xcontent.XContentParser;
     
     import java.io.IOException;
    @@ -49,7 +53,6 @@ import java.util.Set;
     
     public class IndexTemplateMetaData extends AbstractDiffable {
     
    -    public static final IndexTemplateMetaData PROTO = IndexTemplateMetaData.builder("").build();
         private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(IndexTemplateMetaData.class));
     
         private final String name;
    @@ -204,8 +207,7 @@ public class IndexTemplateMetaData extends AbstractDiffable readDiffFrom(StreamInput in) throws IOException {
    +        return readDiffFrom(IndexTemplateMetaData::readFrom, in);
    +    }
    +
         @Override
         public void writeTo(StreamOutput out) throws IOException {
             out.writeString(name);
    @@ -395,10 +401,7 @@ public class IndexTemplateMetaData extends AbstractDiffable cursor : indexTemplateMetaData.mappings()) {
                         byte[] mappingSource = cursor.value.uncompressed();
    -                    Map mapping;
    -                    try (XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource)) {;
    -                        mapping = parser.map();
    -                    }
    +                    Map mapping = XContentHelper.convertToMap(new BytesArray(mappingSource), false).v2();
                         if (mapping.size() == 1 && mapping.containsKey(cursor.key)) {
                             // the type name is the root value, reduce it
                             mapping = (Map) mapping.get(cursor.key);
    @@ -411,10 +414,7 @@ public class IndexTemplateMetaData extends AbstractDiffable cursor : indexTemplateMetaData.mappings()) {
                         byte[] data = cursor.value.uncompressed();
    -                    try (XContentParser parser = XContentFactory.xContent(data).createParser(data)) {
    -                        Map mapping = parser.mapOrdered();
    -                        builder.map(mapping);
    -                    }
    +                    builder.map(XContentHelper.convertToMap(new BytesArray(data), true).v2());
                     }
                     builder.endArray();
                 }
    @@ -529,10 +529,6 @@ public class IndexTemplateMetaData extends AbstractDiffable {
     
    -    public static final MappingMetaData PROTO = new MappingMetaData();
    -
         public static class Routing {
     
             public static final Routing EMPTY = new Routing(false);
    @@ -89,10 +87,7 @@ public class MappingMetaData extends AbstractDiffable {
     
         public MappingMetaData(CompressedXContent mapping) throws IOException {
             this.source = mapping;
    -        Map mappingMap;
    -        try (XContentParser parser = XContentHelper.createParser(mapping.compressedReference())) {
    -            mappingMap = parser.mapOrdered();
    -        }
    +        Map mappingMap = XContentHelper.convertToMap(mapping.compressedReference(), true).v2();
             if (mappingMap.size() != 1) {
                 throw new IllegalStateException("Can't derive type from mapping, no root type: " + mapping.string());
             }
    @@ -100,10 +95,6 @@ public class MappingMetaData extends AbstractDiffable {
             initMappers((Map) mappingMap.get(this.type));
         }
     
    -    public MappingMetaData(Map mapping) throws IOException {
    -        this(mapping.keySet().iterator().next(), mapping);
    -    }
    -
         public MappingMetaData(String type, Map mapping) throws IOException {
             this.type = type;
             XContentBuilder mappingBuilder = XContentFactory.jsonBuilder().map(mapping);
    @@ -132,7 +123,12 @@ public class MappingMetaData extends AbstractDiffable {
                     String fieldName = entry.getKey();
                     Object fieldNode = entry.getValue();
                     if (fieldName.equals("required")) {
    -                    required = lenientNodeBooleanValue(fieldNode);
    +                    try {
    +                        required = nodeBooleanValue(fieldNode);
    +                    } catch (IllegalArgumentException ex) {
    +                        throw new IllegalArgumentException("Failed to create mapping for type [" + this.type() + "]. " +
    +                            "Illegal value in field [_routing.required].", ex);
    +                    }
                     }
                 }
                 this.routing = new Routing(required);
    @@ -204,7 +200,7 @@ public class MappingMetaData extends AbstractDiffable {
                 // timestamp
                 out.writeBoolean(false); // enabled
                 out.writeString(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format());
    -            out.writeOptionalString(null);
    +            out.writeOptionalString("now"); // 5.x default
                 out.writeOptionalBoolean(null);
             }
             out.writeBoolean(hasParentField());
    @@ -232,11 +228,11 @@ public class MappingMetaData extends AbstractDiffable {
             return result;
         }
     
    -    public MappingMetaData readFrom(StreamInput in) throws IOException {
    -        String type = in.readString();
    -        CompressedXContent source = CompressedXContent.readCompressedString(in);
    +    public MappingMetaData(StreamInput in) throws IOException {
    +        type = in.readString();
    +        source = CompressedXContent.readCompressedString(in);
             // routing
    -        Routing routing = new Routing(in.readBoolean());
    +        routing = new Routing(in.readBoolean());
             if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
                 // timestamp
                 boolean enabled = in.readBoolean();
    @@ -247,9 +243,11 @@ public class MappingMetaData extends AbstractDiffable {
                 in.readOptionalString(); // defaultTimestamp
                 in.readOptionalBoolean(); // ignoreMissing
             }
    +        hasParentField = in.readBoolean();
    +    }
     
    -        final boolean hasParentField = in.readBoolean();
    -        return new MappingMetaData(type, source, routing, hasParentField);
    +    public static Diff readDiffFrom(StreamInput in) throws IOException {
    +        return readDiffFrom(MappingMetaData::new, in);
         }
     
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
    index f091159aaec..a13e5e21e5e 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
    @@ -24,40 +24,36 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
     import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
     import org.apache.logging.log4j.Logger;
     import org.apache.lucene.util.CollectionUtil;
    +import org.elasticsearch.Version;
     import org.elasticsearch.cluster.Diff;
     import org.elasticsearch.cluster.Diffable;
     import org.elasticsearch.cluster.DiffableUtils;
    -import org.elasticsearch.cluster.InternalClusterInfoService;
    +import org.elasticsearch.cluster.NamedDiffable;
    +import org.elasticsearch.cluster.NamedDiffableValueSerializer;
     import org.elasticsearch.cluster.block.ClusterBlock;
     import org.elasticsearch.cluster.block.ClusterBlockLevel;
    -import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings;
    -import org.elasticsearch.cluster.service.ClusterService;
     import org.elasticsearch.common.Nullable;
    -import org.elasticsearch.common.ParseFieldMatcher;
     import org.elasticsearch.common.UUIDs;
     import org.elasticsearch.common.collect.HppcMaps;
     import org.elasticsearch.common.collect.ImmutableOpenMap;
     import org.elasticsearch.common.io.stream.StreamInput;
     import org.elasticsearch.common.io.stream.StreamOutput;
    +import org.elasticsearch.common.logging.Loggers;
     import org.elasticsearch.common.regex.Regex;
     import org.elasticsearch.common.settings.Setting;
     import org.elasticsearch.common.settings.Setting.Property;
     import org.elasticsearch.common.settings.Settings;
     import org.elasticsearch.common.settings.loader.SettingsLoader;
    -import org.elasticsearch.common.xcontent.FromXContentBuilder;
    +import org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException;
     import org.elasticsearch.common.xcontent.ToXContent;
     import org.elasticsearch.common.xcontent.XContentBuilder;
     import org.elasticsearch.common.xcontent.XContentFactory;
     import org.elasticsearch.common.xcontent.XContentParser;
     import org.elasticsearch.common.xcontent.XContentType;
    -import org.elasticsearch.discovery.DiscoverySettings;
     import org.elasticsearch.gateway.MetaDataStateFormat;
     import org.elasticsearch.index.Index;
     import org.elasticsearch.index.IndexNotFoundException;
    -import org.elasticsearch.indices.recovery.RecoverySettings;
    -import org.elasticsearch.ingest.IngestMetadata;
     import org.elasticsearch.rest.RestStatus;
    -import org.elasticsearch.script.ScriptMetaData;
     
     import java.io.IOException;
     import java.util.ArrayList;
    @@ -69,18 +65,15 @@ import java.util.HashMap;
     import java.util.Iterator;
     import java.util.List;
     import java.util.Map;
    -import java.util.Set;
     import java.util.SortedMap;
     import java.util.TreeMap;
     
    -import static java.util.Collections.unmodifiableSet;
     import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
     import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
    -import static org.elasticsearch.common.util.set.Sets.newHashSet;
     
    -public class MetaData implements Iterable, Diffable, FromXContentBuilder, ToXContent {
    +public class MetaData implements Iterable, Diffable, ToXContent {
     
    -    public static final MetaData PROTO = builder().build();
    +    private static final Logger logger = Loggers.getLogger(MetaData.class);
     
         public static final String ALL = "_all";
     
    @@ -118,48 +111,11 @@ public class MetaData implements Iterable, Diffable, Fr
          */
         public static EnumSet ALL_CONTEXTS = EnumSet.allOf(XContentContext.class);
     
    -    public interface Custom extends Diffable, ToXContent {
    -
    -        String type();
    -
    -        Custom fromXContent(XContentParser parser) throws IOException;
    +    public interface Custom extends NamedDiffable, ToXContent {
     
             EnumSet context();
         }
     
    -    public static Map customPrototypes = new HashMap<>();
    -
    -    static {
    -        // register non plugin custom metadata
    -        registerPrototype(RepositoriesMetaData.TYPE, RepositoriesMetaData.PROTO);
    -        registerPrototype(IngestMetadata.TYPE, IngestMetadata.PROTO);
    -        registerPrototype(ScriptMetaData.TYPE, ScriptMetaData.PROTO);
    -        registerPrototype(IndexGraveyard.TYPE, IndexGraveyard.PROTO);
    -    }
    -
    -    /**
    -     * Register a custom index meta data factory. Make sure to call it from a static block.
    -     */
    -    public static void registerPrototype(String type, Custom proto) {
    -        customPrototypes.put(type, proto);
    -    }
    -
    -    @Nullable
    -    public static  T lookupPrototype(String type) {
    -        //noinspection unchecked
    -        return (T) customPrototypes.get(type);
    -    }
    -
    -    public static  T lookupPrototypeSafe(String type) {
    -        //noinspection unchecked
    -        T proto = (T) customPrototypes.get(type);
    -        if (proto == null) {
    -            throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "], node likely missing plugins");
    -        }
    -        return proto;
    -    }
    -
    -
         public static final Setting SETTING_READ_ONLY_SETTING =
             Setting.boolSetting("cluster.blocks.read_only", false, Property.Dynamic, Property.NodeScope);
     
    @@ -175,6 +131,8 @@ public class MetaData implements Iterable, Diffable, Fr
     
         public static final String GLOBAL_STATE_FILE_PREFIX = "global-";
     
    +    private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class);
    +
         private final String clusterUUID;
         private final long version;
     
    @@ -596,14 +554,14 @@ public class MetaData implements Iterable, Diffable, Fr
             // Check if any persistent metadata needs to be saved
             int customCount1 = 0;
             for (ObjectObjectCursor cursor : metaData1.customs) {
    -            if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) {
    +            if (cursor.value.context().contains(XContentContext.GATEWAY)) {
                     if (!cursor.value.equals(metaData2.custom(cursor.key))) return false;
                     customCount1++;
                 }
             }
             int customCount2 = 0;
    -        for (ObjectObjectCursor cursor : metaData2.customs) {
    -            if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) {
    +        for (ObjectCursor cursor : metaData2.customs.values()) {
    +            if (cursor.value.context().contains(XContentContext.GATEWAY)) {
                     customCount2++;
                 }
             }
    @@ -616,13 +574,11 @@ public class MetaData implements Iterable, Diffable, Fr
             return new MetaDataDiff(previousState, this);
         }
     
    -    @Override
    -    public Diff readDiffFrom(StreamInput in) throws IOException {
    +    public static Diff readDiffFrom(StreamInput in) throws IOException {
             return new MetaDataDiff(in);
         }
     
    -    @Override
    -    public MetaData fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException {
    +    public static MetaData fromXContent(XContentParser parser) throws IOException {
             return Builder.fromXContent(parser);
         }
     
    @@ -644,35 +600,26 @@ public class MetaData implements Iterable, Diffable, Fr
             private Diff> templates;
             private Diff> customs;
     
    -        public MetaDataDiff(MetaData before, MetaData after) {
    +        MetaDataDiff(MetaData before, MetaData after) {
                 clusterUUID = after.clusterUUID;
                 version = after.version;
                 transientSettings = after.transientSettings;
                 persistentSettings = after.persistentSettings;
                 indices = DiffableUtils.diff(before.indices, after.indices, DiffableUtils.getStringKeySerializer());
                 templates = DiffableUtils.diff(before.templates, after.templates, DiffableUtils.getStringKeySerializer());
    -            customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
    +            customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER);
             }
     
    -        public MetaDataDiff(StreamInput in) throws IOException {
    +        MetaDataDiff(StreamInput in) throws IOException {
                 clusterUUID = in.readString();
                 version = in.readLong();
                 transientSettings = Settings.readSettingsFromStream(in);
                 persistentSettings = Settings.readSettingsFromStream(in);
    -            indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexMetaData.PROTO);
    -            templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData.PROTO);
    -            customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
    -                    new DiffableUtils.DiffableValueSerializer() {
    -                @Override
    -                public Custom read(StreamInput in, String key) throws IOException {
    -                    return lookupPrototypeSafe(key).readFrom(in);
    -                }
    -
    -                @Override
    -                public Diff readDiff(StreamInput in, String key) throws IOException {
    -                    return lookupPrototypeSafe(key).readDiffFrom(in);
    -                }
    -            });
    +            indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexMetaData::readFrom,
    +                IndexMetaData::readDiffFrom);
    +            templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData::readFrom,
    +                IndexTemplateMetaData::readDiffFrom);
    +            customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER);
             }
     
             @Override
    @@ -700,8 +647,7 @@ public class MetaData implements Iterable, Diffable, Fr
             }
         }
     
    -    @Override
    -    public MetaData readFrom(StreamInput in) throws IOException {
    +    public static MetaData readFrom(StreamInput in) throws IOException {
             Builder builder = new Builder();
             builder.version = in.readLong();
             builder.clusterUUID = in.readString();
    @@ -709,17 +655,16 @@ public class MetaData implements Iterable, Diffable, Fr
             builder.persistentSettings(readSettingsFromStream(in));
             int size = in.readVInt();
             for (int i = 0; i < size; i++) {
    -            builder.put(IndexMetaData.Builder.readFrom(in), false);
    +            builder.put(IndexMetaData.readFrom(in), false);
             }
             size = in.readVInt();
             for (int i = 0; i < size; i++) {
    -            builder.put(IndexTemplateMetaData.Builder.readFrom(in));
    +            builder.put(IndexTemplateMetaData.readFrom(in));
             }
             int customSize = in.readVInt();
             for (int i = 0; i < customSize; i++) {
    -            String type = in.readString();
    -            Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in);
    -            builder.putCustom(type, customIndexMetaData);
    +            Custom customIndexMetaData = in.readNamedWriteable(Custom.class);
    +            builder.putCustom(customIndexMetaData.getWriteableName(), customIndexMetaData);
             }
             return builder.build();
         }
    @@ -738,10 +683,18 @@ public class MetaData implements Iterable, Diffable, Fr
             for (ObjectCursor cursor : templates.values()) {
                 cursor.value.writeTo(out);
             }
    -        out.writeVInt(customs.size());
    -        for (ObjectObjectCursor cursor : customs) {
    -            out.writeString(cursor.key);
    -            cursor.value.writeTo(out);
    +        // filter out custom states not supported by the other node
    +        int numberOfCustoms = 0;
    +        for (ObjectCursor cursor : customs.values()) {
    +            if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) {
    +                numberOfCustoms++;
    +            }
    +        }
    +        out.writeVInt(numberOfCustoms);
    +        for (ObjectCursor cursor : customs.values()) {
    +            if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) {
    +                out.writeNamedWriteable(cursor.value);
    +            }
             }
         }
     
    @@ -1020,7 +973,7 @@ public class MetaData implements Iterable, Diffable, Fr
                 builder.field("version", metaData.version());
                 builder.field("cluster_uuid", metaData.clusterUUID);
     
    -            if (!metaData.persistentSettings().getAsMap().isEmpty()) {
    +            if (!metaData.persistentSettings().isEmpty()) {
                     builder.startObject("settings");
                     for (Map.Entry entry : metaData.persistentSettings().getAsMap().entrySet()) {
                         builder.field(entry.getKey(), entry.getValue());
    @@ -1028,7 +981,7 @@ public class MetaData implements Iterable, Diffable, Fr
                     builder.endObject();
                 }
     
    -            if (context == XContentContext.API && !metaData.transientSettings().getAsMap().isEmpty()) {
    +            if (context == XContentContext.API && !metaData.transientSettings().isEmpty()) {
                     builder.startObject("transient_settings");
                     for (Map.Entry entry : metaData.transientSettings().getAsMap().entrySet()) {
                         builder.field(entry.getKey(), entry.getValue());
    @@ -1051,8 +1004,7 @@ public class MetaData implements Iterable, Diffable, Fr
                 }
     
                 for (ObjectObjectCursor cursor : metaData.customs()) {
    -                Custom proto = lookupPrototypeSafe(cursor.key);
    -                if (proto.context().contains(context)) {
    +                if (cursor.value.context().contains(context)) {
                         builder.startObject(cursor.key);
                         cursor.value.toXContent(builder, params);
                         builder.endObject();
    @@ -1103,14 +1055,12 @@ public class MetaData implements Iterable, Diffable, Fr
                                 builder.put(IndexTemplateMetaData.Builder.fromXContent(parser, parser.currentName()));
                             }
                         } else {
    -                        // check if its a custom index metadata
    -                        Custom proto = lookupPrototype(currentFieldName);
    -                        if (proto == null) {
    -                            //TODO warn
    +                        try {
    +                            Custom custom = parser.namedObject(Custom.class, currentFieldName, null);
    +                            builder.putCustom(custom.getWriteableName(), custom);
    +                        } catch (UnknownNamedObjectException ex) {
    +                            logger.warn("Skipping unknown custom object with type {}", currentFieldName);
                                 parser.skipChildren();
    -                        } else {
    -                            Custom custom = proto.fromXContent(parser);
    -                            builder.putCustom(custom.type(), custom);
                             }
                         }
                     } else if (token.isValue()) {
    @@ -1127,10 +1077,6 @@ public class MetaData implements Iterable, Diffable, Fr
                 }
                 return builder.build();
             }
    -
    -        public static MetaData readFrom(StreamInput in) throws IOException {
    -            return PROTO.readFrom(in);
    -        }
         }
     
         private static final ToXContent.Params FORMAT_PARAMS;
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java
    index 422492e396b..0bde4a23b03 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java
    @@ -32,6 +32,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias;
     import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
     import org.elasticsearch.action.support.ActiveShardCount;
     import org.elasticsearch.action.support.ActiveShardsObserver;
    +import org.elasticsearch.action.support.ContextPreservingActionListener;
     import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
     import org.elasticsearch.cluster.ClusterState;
     import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
    @@ -59,14 +60,15 @@ import org.elasticsearch.common.io.PathUtils;
     import org.elasticsearch.common.regex.Regex;
     import org.elasticsearch.common.settings.IndexScopedSettings;
     import org.elasticsearch.common.settings.Settings;
    +import org.elasticsearch.common.xcontent.NamedXContentRegistry;
     import org.elasticsearch.common.xcontent.XContentHelper;
     import org.elasticsearch.env.Environment;
     import org.elasticsearch.index.Index;
     import org.elasticsearch.index.IndexNotFoundException;
     import org.elasticsearch.index.IndexService;
     import org.elasticsearch.index.mapper.DocumentMapper;
    -import org.elasticsearch.index.mapper.MapperParsingException;
     import org.elasticsearch.index.mapper.MapperService;
    +import org.elasticsearch.index.mapper.MapperService.MergeReason;
     import org.elasticsearch.index.query.QueryShardContext;
     import org.elasticsearch.indices.IndexCreationException;
     import org.elasticsearch.indices.IndicesService;
    @@ -112,12 +114,15 @@ public class MetaDataCreateIndexService extends AbstractComponent {
         private final Environment env;
         private final IndexScopedSettings indexScopedSettings;
         private final ActiveShardsObserver activeShardsObserver;
    +    private final NamedXContentRegistry xContentRegistry;
    +    private final ThreadPool threadPool;
     
         @Inject
         public MetaDataCreateIndexService(Settings settings, ClusterService clusterService,
                                           IndicesService indicesService, AllocationService allocationService,
                                           AliasValidator aliasValidator, Environment env,
    -                                      IndexScopedSettings indexScopedSettings, ThreadPool threadPool) {
    +                                      IndexScopedSettings indexScopedSettings, ThreadPool threadPool,
    +                                      NamedXContentRegistry xContentRegistry) {
             super(settings);
             this.clusterService = clusterService;
             this.indicesService = indicesService;
    @@ -126,6 +131,8 @@ public class MetaDataCreateIndexService extends AbstractComponent {
             this.env = env;
             this.indexScopedSettings = indexScopedSettings;
             this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool);
    +        this.threadPool = threadPool;
    +        this.xContentRegistry = xContentRegistry;
         }
     
         /**
    @@ -215,7 +222,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
             request.settings(updatedSettingsBuilder.build());
     
             clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]",
    -                new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) {
    +                new AckedClusterStateUpdateTask(Priority.URGENT, request, wrapPreservingContext(listener)) {
                         @Override
                         protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
                             return new ClusterStateUpdateResponse(acknowledged);
    @@ -247,7 +254,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
                                 List templateNames = new ArrayList<>();
     
                                 for (Map.Entry entry : request.mappings().entrySet()) {
    -                                mappings.put(entry.getKey(), MapperService.parseMapping(entry.getValue()));
    +                                mappings.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue()));
                                 }
     
                                 for (Map.Entry entry : request.customs().entrySet()) {
    @@ -258,10 +265,13 @@ public class MetaDataCreateIndexService extends AbstractComponent {
                                 for (IndexTemplateMetaData template : templates) {
                                     templateNames.add(template.getName());
                                     for (ObjectObjectCursor cursor : template.mappings()) {
    +                                    String mappingString = cursor.value.string();
                                         if (mappings.containsKey(cursor.key)) {
    -                                        XContentHelper.mergeDefaults(mappings.get(cursor.key), MapperService.parseMapping(cursor.value.string()));
    +                                        XContentHelper.mergeDefaults(mappings.get(cursor.key),
    +                                                MapperService.parseMapping(xContentRegistry, mappingString));
                                         } else {
    -                                        mappings.put(cursor.key, MapperService.parseMapping(cursor.value.string()));
    +                                        mappings.put(cursor.key,
    +                                            MapperService.parseMapping(xContentRegistry, mappingString));
                                         }
                                     }
                                     // handle custom
    @@ -356,10 +366,10 @@ public class MetaDataCreateIndexService extends AbstractComponent {
                                 // now add the mappings
                                 MapperService mapperService = indexService.mapperService();
                                 try {
    -                                mapperService.merge(mappings, request.updateAllTypes());
    -                            } catch (MapperParsingException mpe) {
    +                                mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, request.updateAllTypes());
    +                            } catch (Exception e) {
                                     removalExtraInfo = "failed on parsing default mapping/mappings on index creation";
    -                                throw mpe;
    +                                throw e;
                                 }
     
                                 // the context is only used for validation so it's fine to pass fake values for the shard id and the current
    @@ -367,12 +377,13 @@ public class MetaDataCreateIndexService extends AbstractComponent {
                                 final QueryShardContext queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L);
                                 for (Alias alias : request.aliases()) {
                                     if (Strings.hasLength(alias.filter())) {
    -                                    aliasValidator.validateAliasFilter(alias.name(), alias.filter(), queryShardContext);
    +                                    aliasValidator.validateAliasFilter(alias.name(), alias.filter(), queryShardContext, xContentRegistry);
                                     }
                                 }
                                 for (AliasMetaData aliasMetaData : templatesAliases.values()) {
                                     if (aliasMetaData.filter() != null) {
    -                                    aliasValidator.validateAliasFilter(aliasMetaData.alias(), aliasMetaData.filter().uncompressed(), queryShardContext);
    +                                    aliasValidator.validateAliasFilter(aliasMetaData.alias(), aliasMetaData.filter().uncompressed(),
    +                                            queryShardContext, xContentRegistry);
                                     }
                                 }
     
    @@ -420,7 +431,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
                                         .put(indexMetaData, false)
                                         .build();
     
    -                            String maybeShadowIndicator = IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.getSettings()) ? "s" : "";
    +                            String maybeShadowIndicator = indexMetaData.isIndexUsingShadowReplicas() ? "s" : "";
                                 logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}{}], mappings {}",
                                         request.index(), request.cause(), templateNames, indexMetaData.getNumberOfShards(),
                                         indexMetaData.getNumberOfReplicas(), maybeShadowIndicator, mappings.keySet());
    @@ -465,6 +476,10 @@ public class MetaDataCreateIndexService extends AbstractComponent {
                     });
         }
     
    +    private ContextPreservingActionListener wrapPreservingContext(ActionListener listener) {
    +        return new ContextPreservingActionListener<>(threadPool.getThreadContext().newRestorableContext(false), listener);
    +    }
    +
         private List findTemplates(CreateIndexClusterStateUpdateRequest request, ClusterState state) throws IOException {
             List templateMetadata = new ArrayList<>();
             for (ObjectCursor cursor : state.metaData().templates().values()) {
    @@ -538,6 +553,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
                 throw new IllegalArgumentException("mappings are not allowed when shrinking indices" +
                     ", all mappings are copied from the source index");
             }
    +
             if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
                 // this method applies all necessary checks ie. if the target shards are less than the source shards
                 // of if the source shards are divisible by the number of target shards
    @@ -581,9 +597,15 @@ public class MetaDataCreateIndexService extends AbstractComponent {
                 .put("index.allocation.max_retries", 1)
                 // now copy all similarity / analysis settings - this overrides all settings from the user unless they
                 // wanna add extra settings
    +            .put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion())
    +            .put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion())
                 .put(sourceMetaData.getSettings().filter(analysisSimilarityPredicate))
    +            .put(IndexMetaData.SETTING_ROUTING_PARTITION_SIZE, sourceMetaData.getRoutingPartitionSize())
                 .put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), shrinkFromIndex.getName())
                 .put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), shrinkFromIndex.getUUID());
    +        if (sourceMetaData.getMinimumCompatibleVersion() != null) {
    +            indexSettingsBuilder.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, sourceMetaData.getMinimumCompatibleVersion());
    +        }
         }
     
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java
    index 7dbc06cba0f..a2212b5c3f0 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java
    @@ -97,7 +97,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
             final int previousGraveyardSize = graveyardBuilder.tombstones().size();
             for (final Index index : indices) {
                 String indexName = index.getName();
    -            logger.debug("[{}] deleting index", index);
    +            logger.info("{} deleting index", index);
                 routingTableBuilder.remove(indexName);
                 clusterBlocksBuilder.removeIndexBlocks(indexName);
                 metaDataBuilder.remove(indexName);
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java
    index c1de936d9c7..81fbddce46a 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java
    @@ -33,6 +33,7 @@ import org.elasticsearch.common.Strings;
     import org.elasticsearch.common.component.AbstractComponent;
     import org.elasticsearch.common.inject.Inject;
     import org.elasticsearch.common.settings.Settings;
    +import org.elasticsearch.common.xcontent.NamedXContentRegistry;
     import org.elasticsearch.index.Index;
     import org.elasticsearch.index.IndexNotFoundException;
     import org.elasticsearch.index.IndexService;
    @@ -64,18 +65,17 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
     
         private final MetaDataDeleteIndexService deleteIndexService;
     
    +    private final NamedXContentRegistry xContentRegistry;
    +
         @Inject
    -    public MetaDataIndexAliasesService(
    -        Settings settings,
    -        ClusterService clusterService,
    -        IndicesService indicesService,
    -        AliasValidator aliasValidator,
    -        MetaDataDeleteIndexService deleteIndexService) {
    +    public MetaDataIndexAliasesService(Settings settings, ClusterService clusterService, IndicesService indicesService,
    +            AliasValidator aliasValidator, MetaDataDeleteIndexService deleteIndexService, NamedXContentRegistry xContentRegistry) {
             super(settings);
             this.clusterService = clusterService;
             this.indicesService = indicesService;
             this.aliasValidator = aliasValidator;
             this.deleteIndexService = deleteIndexService;
    +        this.xContentRegistry = xContentRegistry;
         }
     
         public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request,
    @@ -141,21 +141,18 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
                                     // temporarily create the index and add mappings so we can parse the filter
                                     try {
                                         indexService = indicesService.createIndex(index, emptyList(), shardId -> {});
    +                                    indicesToClose.add(index.getIndex());
                                     } catch (IOException e) {
                                         throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e);
                                     }
    -                                for (ObjectCursor cursor : index.getMappings().values()) {
    -                                    MappingMetaData mappingMetaData = cursor.value;
    -                                    indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(),
    -                                        MapperService.MergeReason.MAPPING_RECOVERY, false);
    -                                }
    -                                indicesToClose.add(index.getIndex());
    +                                indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY, false);
                                 }
                                 indices.put(action.getIndex(), indexService);
                             }
                             // the context is only used for validation so it's fine to pass fake values for the shard id and the current
                             // timestamp
    -                        aliasValidator.validateAliasFilter(alias, filter, indexService.newQueryShardContext(0, null, () -> 0L));
    +                        aliasValidator.validateAliasFilter(alias, filter, indexService.newQueryShardContext(0, null, () -> 0L),
    +                                xContentRegistry);
                         }
                     };
                     changed |= action.apply(newAliasValidator, metadata, index);
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java
    index 020a1d75231..e35fc7c837c 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java
    @@ -19,6 +19,7 @@
     package org.elasticsearch.cluster.metadata;
     
     import com.carrotsearch.hppc.cursors.ObjectCursor;
    +
     import org.elasticsearch.Version;
     import org.elasticsearch.action.admin.indices.alias.Alias;
     import org.elasticsearch.action.support.master.MasterNodeRequest;
    @@ -35,10 +36,12 @@ import org.elasticsearch.common.regex.Regex;
     import org.elasticsearch.common.settings.IndexScopedSettings;
     import org.elasticsearch.common.settings.Settings;
     import org.elasticsearch.common.unit.TimeValue;
    +import org.elasticsearch.common.xcontent.NamedXContentRegistry;
     import org.elasticsearch.index.Index;
     import org.elasticsearch.index.IndexService;
     import org.elasticsearch.index.mapper.MapperParsingException;
     import org.elasticsearch.index.mapper.MapperService;
    +import org.elasticsearch.index.mapper.MapperService.MergeReason;
     import org.elasticsearch.indices.IndexTemplateMissingException;
     import org.elasticsearch.indices.IndicesService;
     import org.elasticsearch.indices.InvalidIndexTemplateException;
    @@ -64,18 +67,20 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
         private final IndicesService indicesService;
         private final MetaDataCreateIndexService metaDataCreateIndexService;
         private final IndexScopedSettings indexScopedSettings;
    +    private final NamedXContentRegistry xContentRegistry;
     
         @Inject
         public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService,
                                             MetaDataCreateIndexService metaDataCreateIndexService,
                                             AliasValidator aliasValidator, IndicesService indicesService,
    -                                        IndexScopedSettings indexScopedSettings) {
    +                                        IndexScopedSettings indexScopedSettings, NamedXContentRegistry xContentRegistry) {
             super(settings);
             this.clusterService = clusterService;
             this.aliasValidator = aliasValidator;
             this.indicesService = indicesService;
             this.metaDataCreateIndexService = metaDataCreateIndexService;
             this.indexScopedSettings = indexScopedSettings;
    +        this.xContentRegistry = xContentRegistry;
         }
     
         public void removeTemplates(final RemoveRequest request, final RemoveListener listener) {
    @@ -164,7 +169,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
                         throw new IllegalArgumentException("index_template [" + request.name + "] already exists");
                     }
     
    -                validateAndAddTemplate(request, templateBuilder, indicesService);
    +                validateAndAddTemplate(request, templateBuilder, indicesService, xContentRegistry);
     
                     for (Alias alias : request.aliases) {
                         AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter())
    @@ -189,16 +194,20 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
         }
     
         private static void validateAndAddTemplate(final PutRequest request, IndexTemplateMetaData.Builder templateBuilder,
    -            IndicesService indicesService) throws Exception {
    +            IndicesService indicesService, NamedXContentRegistry xContentRegistry) throws Exception {
             Index createdIndex = null;
             final String temporaryIndexName = UUIDs.randomBase64UUID();
             try {
    +            // use the provided values, otherwise just pick valid dummy values
    +            int dummyPartitionSize = IndexMetaData.INDEX_ROUTING_PARTITION_SIZE_SETTING.get(request.settings);
    +            int dummyShards = request.settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS,
    +                    dummyPartitionSize == 1 ? 1 : dummyPartitionSize + 1);
     
                 //create index service for parsing and validating "mappings"
                 Settings dummySettings = Settings.builder()
                     .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
                     .put(request.settings)
    -                .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
    +                .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, dummyShards)
                     .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
                     .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
                     .build();
    @@ -219,10 +228,10 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
                     } catch (Exception e) {
                         throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
                     }
    -                mappingsForValidation.put(entry.getKey(), MapperService.parseMapping(entry.getValue()));
    +                mappingsForValidation.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue()));
                 }
     
    -            dummyIndexService.mapperService().merge(mappingsForValidation, false);
    +            dummyIndexService.mapperService().merge(mappingsForValidation, MergeReason.MAPPING_UPDATE, false);
     
             } finally {
                 if (createdIndex != null) {
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
    index 2a8b80b9e68..614d12547fc 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
    @@ -27,6 +27,7 @@ import org.elasticsearch.common.component.AbstractComponent;
     import org.elasticsearch.common.inject.Inject;
     import org.elasticsearch.common.settings.IndexScopedSettings;
     import org.elasticsearch.common.settings.Settings;
    +import org.elasticsearch.common.xcontent.NamedXContentRegistry;
     import org.elasticsearch.index.IndexSettings;
     import org.elasticsearch.index.analysis.AnalyzerScope;
     import org.elasticsearch.index.analysis.IndexAnalyzers;
    @@ -50,12 +51,15 @@ import java.util.Set;
      */
     public class MetaDataIndexUpgradeService extends AbstractComponent {
     
    +    private final NamedXContentRegistry xContentRegistry;
         private final MapperRegistry mapperRegistry;
         private final IndexScopedSettings indexScopedSettings;
     
         @Inject
    -    public MetaDataIndexUpgradeService(Settings settings, MapperRegistry mapperRegistry, IndexScopedSettings indexScopedSettings) {
    +    public MetaDataIndexUpgradeService(Settings settings, NamedXContentRegistry xContentRegistry, MapperRegistry mapperRegistry,
    +            IndexScopedSettings indexScopedSettings) {
             super(settings);
    +        this.xContentRegistry = xContentRegistry;
             this.mapperRegistry = mapperRegistry;
             this.indexScopedSettings = indexScopedSettings;
         }
    @@ -141,16 +145,13 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
     
                     @Override
                     public Set> entrySet() {
    -                    // just to ensure we can iterate over this single analzyer
    -                    return Collections.singletonMap(fakeDefault.name(), fakeDefault).entrySet();
    +                    return Collections.emptySet();
                     }
                 };
    -            try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap)) {
    -                MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, similarityService, mapperRegistry, () -> null);
    -                for (ObjectCursor cursor : indexMetaData.getMappings().values()) {
    -                    MappingMetaData mappingMetaData = cursor.value;
    -                    mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
    -                }
    +            try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap)) {
    +                MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService,
    +                        mapperRegistry, () -> null);
    +                mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, false);
                 }
             } catch (Exception ex) {
                 // Wrap the inner exception so we have the index name in the exception message
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
    index 4e9b114ff13..c0032a4b6a4 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
    @@ -26,9 +26,9 @@ import org.apache.lucene.util.IOUtils;
     import org.elasticsearch.action.ActionListener;
     import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
     import org.elasticsearch.cluster.AckedClusterStateTaskListener;
    +import org.elasticsearch.cluster.ClusterStateTaskExecutor;
     import org.elasticsearch.cluster.ClusterState;
     import org.elasticsearch.cluster.ClusterStateTaskConfig;
    -import org.elasticsearch.cluster.ClusterStateTaskExecutor;
     import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
     import org.elasticsearch.cluster.node.DiscoveryNode;
     import org.elasticsearch.cluster.service.ClusterService;
    @@ -43,6 +43,7 @@ import org.elasticsearch.index.Index;
     import org.elasticsearch.index.IndexService;
     import org.elasticsearch.index.mapper.DocumentMapper;
     import org.elasticsearch.index.mapper.MapperService;
    +import org.elasticsearch.index.mapper.MapperService.MergeReason;
     import org.elasticsearch.indices.IndicesService;
     import org.elasticsearch.indices.InvalidTypeNameException;
     
    @@ -63,8 +64,8 @@ public class MetaDataMappingService extends AbstractComponent {
         private final ClusterService clusterService;
         private final IndicesService indicesService;
     
    -    final ClusterStateTaskExecutor refreshExecutor = new RefreshTaskExecutor();
    -    final ClusterStateTaskExecutor putMappingExecutor = new PutMappingExecutor();
    +    final RefreshTaskExecutor refreshExecutor = new RefreshTaskExecutor();
    +    final PutMappingExecutor putMappingExecutor = new PutMappingExecutor();
     
     
         @Inject
    @@ -91,9 +92,9 @@ public class MetaDataMappingService extends AbstractComponent {
     
         class RefreshTaskExecutor implements ClusterStateTaskExecutor {
             @Override
    -        public BatchResult execute(ClusterState currentState, List tasks) throws Exception {
    +        public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception {
                 ClusterState newClusterState = executeRefresh(currentState, tasks);
    -            return BatchResult.builder().successes(tasks).build(newClusterState);
    +            return ClusterTasksResult.builder().successes(tasks).build(newClusterState);
             }
         }
     
    @@ -146,10 +147,7 @@ public class MetaDataMappingService extends AbstractComponent {
                     // we need to create the index here, and add the current mapping to it, so we can merge
                     indexService = indicesService.createIndex(indexMetaData, Collections.emptyList(), shardId -> {});
                     removeIndex = true;
    -                for (ObjectCursor metaData : indexMetaData.getMappings().values()) {
    -                    // don't apply the default mapping, it has been applied when the mapping was created
    -                    indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
    -                }
    +                indexService.mapperService().merge(indexMetaData, MergeReason.MAPPING_RECOVERY, true);
                 }
     
                 IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
    @@ -213,10 +211,10 @@ public class MetaDataMappingService extends AbstractComponent {
     
         class PutMappingExecutor implements ClusterStateTaskExecutor {
             @Override
    -        public BatchResult execute(ClusterState currentState,
    -                                                                        List tasks) throws Exception {
    +        public ClusterTasksResult execute(ClusterState currentState,
    +                                                                               List tasks) throws Exception {
                 Map indexMapperServices = new HashMap<>();
    -            BatchResult.Builder builder = BatchResult.builder();
    +            ClusterTasksResult.Builder builder = ClusterTasksResult.builder();
                 try {
                     for (PutMappingClusterStateUpdateRequest request : tasks) {
                         try {
    @@ -226,10 +224,7 @@ public class MetaDataMappingService extends AbstractComponent {
                                     MapperService mapperService = indicesService.createIndexMapperService(indexMetaData);
                                     indexMapperServices.put(index, mapperService);
                                     // add mappings for all types, we need them for cross-type validation
    -                                for (ObjectCursor mapping : indexMetaData.getMappings().values()) {
    -                                    mapperService.merge(mapping.value.type(), mapping.value.source(),
    -                                        MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
    -                                }
    +                                mapperService.merge(indexMetaData, MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
                                 }
                             }
                             currentState = applyRequest(currentState, request, indexMapperServices);
    @@ -313,7 +308,7 @@ public class MetaDataMappingService extends AbstractComponent {
                     if (existingMapper != null) {
                         existingSource = existingMapper.mappingSource();
                     }
    -                DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
    +                DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE, request.updateAllTypes());
                     CompressedXContent updatedSource = mergedMapper.mappingSource();
     
                     if (existingSource != null) {
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
    index 6b19e2d4bf3..6bee999a9e5 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
    @@ -24,6 +24,7 @@ import org.elasticsearch.Version;
     import org.elasticsearch.action.ActionListener;
     import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest;
     import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest;
    +import org.elasticsearch.action.support.ContextPreservingActionListener;
     import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
     import org.elasticsearch.cluster.ClusterChangedEvent;
     import org.elasticsearch.cluster.ClusterState;
    @@ -44,6 +45,7 @@ import org.elasticsearch.common.settings.Settings;
     import org.elasticsearch.common.unit.TimeValue;
     import org.elasticsearch.index.Index;
     import org.elasticsearch.indices.IndicesService;
    +import org.elasticsearch.threadpool.ThreadPool;
     
     import java.io.IOException;
     import java.util.ArrayList;
    @@ -65,12 +67,14 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
     
         private final IndexScopedSettings indexScopedSettings;
         private final IndicesService indicesService;
    +    private final ThreadPool threadPool;
     
         @Inject
         public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService,
    -                                         IndexScopedSettings indexScopedSettings, IndicesService indicesService) {
    +                                         IndexScopedSettings indexScopedSettings, IndicesService indicesService, ThreadPool threadPool) {
             super(settings);
             this.clusterService = clusterService;
    +        this.threadPool = threadPool;
             this.clusterService.addListener(this);
             this.allocationService = allocationService;
             this.indexScopedSettings = indexScopedSettings;
    @@ -180,7 +184,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
             final boolean preserveExisting = request.isPreserveExisting();
     
             clusterService.submitStateUpdateTask("update-settings",
    -                new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) {
    +                new AckedClusterStateUpdateTask(Priority.URGENT, request, wrapPreservingContext(listener)) {
     
                 @Override
                 protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
    @@ -214,7 +218,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
                                 closeIndices
                         ));
                     }
    -                if (!skippedSettigns.getAsMap().isEmpty() && !openIndices.isEmpty()) {
    +                if (!skippedSettigns.isEmpty() && !openIndices.isEmpty()) {
                         throw new IllegalArgumentException(String.format(Locale.ROOT,
                                 "Can't update non dynamic settings [%s] for open indices %s",
                                 skippedSettigns.getAsMap().keySet(),
    @@ -290,6 +294,10 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
             });
         }
     
    +    private ContextPreservingActionListener wrapPreservingContext(ActionListener listener) {
    +        return new ContextPreservingActionListener<>(threadPool.getThreadContext().newRestorableContext(false), listener);
    +    }
    +
         /**
          * Updates the cluster block only iff the setting exists in the given settings
          */
    @@ -308,9 +316,8 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
     
     
         public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener listener) {
    -
    -
    -        clusterService.submitStateUpdateTask("update-index-compatibility-versions", new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) {
    +        clusterService.submitStateUpdateTask("update-index-compatibility-versions",
    +            new AckedClusterStateUpdateTask(Priority.URGENT, request, wrapPreservingContext(listener)) {
     
                 @Override
                 protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java
    index 2dc842ceaae..67909bff614 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java
    @@ -21,6 +21,9 @@ package org.elasticsearch.cluster.metadata;
     
     import org.elasticsearch.ElasticsearchParseException;
     import org.elasticsearch.cluster.AbstractDiffable;
    +import org.elasticsearch.cluster.AbstractNamedDiffable;
    +import org.elasticsearch.cluster.ClusterState;
    +import org.elasticsearch.cluster.NamedDiff;
     import org.elasticsearch.cluster.metadata.MetaData.Custom;
     import org.elasticsearch.common.io.stream.StreamInput;
     import org.elasticsearch.common.io.stream.StreamOutput;
    @@ -39,12 +42,10 @@ import java.util.List;
     /**
      * Contains metadata about registered snapshot repositories
      */
    -public class RepositoriesMetaData extends AbstractDiffable implements MetaData.Custom {
    +public class RepositoriesMetaData extends AbstractNamedDiffable implements Custom {
     
         public static final String TYPE = "repositories";
     
    -    public static final RepositoriesMetaData PROTO = new RepositoriesMetaData();
    -
         private final List repositories;
     
         /**
    @@ -100,20 +101,20 @@ public class RepositoriesMetaData extends AbstractDiffable implements Me
          * {@inheritDoc}
          */
         @Override
    -    public String type() {
    +    public String getWriteableName() {
             return TYPE;
         }
     
    -    /**
    -     * {@inheritDoc}
    -     */
    -    @Override
    -    public Custom readFrom(StreamInput in) throws IOException {
    +    public RepositoriesMetaData(StreamInput in) throws IOException {
             RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()];
             for (int i = 0; i < repository.length; i++) {
    -            repository[i] = RepositoryMetaData.readFrom(in);
    +            repository[i] = new RepositoryMetaData(in);
             }
    -        return new RepositoriesMetaData(repository);
    +        this.repositories = Arrays.asList(repository);
    +    }
    +
    +    public static NamedDiff readDiffFrom(StreamInput in) throws  IOException {
    +        return readDiffFrom(Custom.class, TYPE, in);
         }
     
         /**
    @@ -127,11 +128,7 @@ public class RepositoriesMetaData extends AbstractDiffable implements Me
             }
         }
     
    -    /**
    -     * {@inheritDoc}
    -     */
    -    @Override
    -    public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException {
    +    public static RepositoriesMetaData fromXContent(XContentParser parser) throws IOException {
             XContentParser.Token token;
             List repository = new ArrayList<>();
             while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
    diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java
    index 3c13a10c1cf..847db915b8b 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java
    @@ -73,17 +73,10 @@ public class RepositoryMetaData {
         }
     
     
    -    /**
    -     * Reads repository metadata from stream input
    -     *
    -     * @param in stream input
    -     * @return repository metadata
    -     */
    -    public static RepositoryMetaData readFrom(StreamInput in) throws IOException {
    -        String name = in.readString();
    -        String type = in.readString();
    -        Settings settings = Settings.readSettingsFromStream(in);
    -        return new RepositoryMetaData(name, type, settings);
    +    public RepositoryMetaData(StreamInput in) throws IOException {
    +        name = in.readString();
    +        type = in.readString();
    +        settings = Settings.readSettingsFromStream(in);
         }
     
         /**
    diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
    index a8290d317e8..c81161f1deb 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
    @@ -202,7 +202,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
                 roles.add(DiscoveryNode.Role.DATA);
             }
     
    -        return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress,attributes, roles, Version.CURRENT);
    +        return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress, attributes, roles, Version.CURRENT);
         }
     
         /**
    diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java
    index fad86caa7cc..3fcfdc08722 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java
    @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.node;
     
     import org.elasticsearch.common.Nullable;
     import org.elasticsearch.common.Strings;
    +import org.elasticsearch.common.network.InetAddresses;
     import org.elasticsearch.common.network.NetworkAddress;
     import org.elasticsearch.common.regex.Regex;
     import org.elasticsearch.common.settings.Settings;
    @@ -28,6 +29,7 @@ import org.elasticsearch.common.transport.TransportAddress;
     
     import java.util.HashMap;
     import java.util.Map;
    +import java.util.function.Consumer;
     
     public class DiscoveryNodeFilters {
     
    @@ -36,6 +38,25 @@ public class DiscoveryNodeFilters {
             OR
         }
     
    +    /**
    +     * Validates the IP addresses in a group of {@link Settings} by looking for the keys
    +     * "_ip", "_host_ip", and "_publish_ip" and ensuring each of their comma separated values
    +     * is a valid IP address.
    +     */
    +    public static final Consumer IP_VALIDATOR = (settings) -> {
    +        Map settingsMap = settings.getAsMap();
    +        for (Map.Entry entry : settingsMap.entrySet()) {
    +            String propertyKey = entry.getKey();
    +            if ("_ip".equals(propertyKey) || "_host_ip".equals(propertyKey) || "_publish_ip".equals(propertyKey)) {
    +                for (String value : Strings.tokenizeToStringArray(entry.getValue(), ",")) {
    +                    if (InetAddresses.isInetAddress(value) == false) {
    +                        throw new IllegalArgumentException("invalid IP address [" + value + "] for [" + propertyKey + "]");
    +                    }
    +                }
    +            }
    +        }
    +    };
    +
         public static DiscoveryNodeFilters buildFromSettings(OpType opType, String prefix, Settings settings) {
             return buildFromKeyValue(opType, settings.getByPrefix(prefix).getAsMap());
         }
    @@ -43,7 +64,7 @@ public class DiscoveryNodeFilters {
         public static DiscoveryNodeFilters buildFromKeyValue(OpType opType, Map filters) {
             Map bFilters = new HashMap<>();
             for (Map.Entry entry : filters.entrySet()) {
    -            String[] values = Strings.splitStringByCommaToArray(entry.getValue());
    +            String[] values = Strings.tokenizeToStringArray(entry.getValue(), ",");
                 if (values.length > 0) {
                     bFilters.put(entry.getKey(), values);
                 }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java
    index 6d80a9573ba..4829c9c9753 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java
    @@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
     import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
     import org.elasticsearch.Version;
     import org.elasticsearch.cluster.AbstractDiffable;
    +import org.elasticsearch.cluster.Diff;
     import org.elasticsearch.common.Booleans;
     import org.elasticsearch.common.Nullable;
     import org.elasticsearch.common.collect.ImmutableOpenMap;
    @@ -46,7 +47,6 @@ import java.util.Map;
     public class DiscoveryNodes extends AbstractDiffable implements Iterable {
     
         public static final DiscoveryNodes EMPTY_NODES = builder().build();
    -    public static final DiscoveryNodes PROTO = EMPTY_NODES;
     
         private final ImmutableOpenMap nodes;
         private final ImmutableOpenMap dataNodes;
    @@ -523,7 +523,7 @@ public class DiscoveryNodes extends AbstractDiffable implements
             }
         }
     
    -    private DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
    +    public static DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
             Builder builder = new Builder();
             if (in.readBoolean()) {
                 builder.masterNodeId(in.readString());
    @@ -546,9 +546,8 @@ public class DiscoveryNodes extends AbstractDiffable implements
             return builder.build();
         }
     
    -    @Override
    -    public DiscoveryNodes readFrom(StreamInput in) throws IOException {
    -        return readFrom(in, getLocalNode());
    +    public static Diff readDiffFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
    +        return AbstractDiffable.readDiffFrom(in1 -> readFrom(in1, localNode), in);
         }
     
         public static Builder builder() {
    @@ -678,10 +677,6 @@ public class DiscoveryNodes extends AbstractDiffable implements
                 );
             }
     
    -        public static DiscoveryNodes readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
    -            return PROTO.readFrom(in, localNode);
    -        }
    -
             public boolean isLocalNodeElectedMaster() {
                 return masterNodeId != null && masterNodeId.equals(localNodeId);
             }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java b/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java
    index cb0fb487693..9ab91f9f8de 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java
    @@ -21,8 +21,6 @@ package org.elasticsearch.cluster.routing;
     
     import org.elasticsearch.common.Nullable;
     import org.elasticsearch.common.ParseField;
    -import org.elasticsearch.common.ParseFieldMatcher;
    -import org.elasticsearch.common.ParseFieldMatcherSupplier;
     import org.elasticsearch.common.UUIDs;
     import org.elasticsearch.common.io.stream.StreamInput;
     import org.elasticsearch.common.io.stream.StreamOutput;
    @@ -48,7 +46,7 @@ public class AllocationId implements ToXContent, Writeable {
         private static final String ID_KEY = "id";
         private static final String RELOCATION_ID_KEY = "relocation_id";
     
    -    private static final ObjectParser ALLOCATION_ID_PARSER = new ObjectParser<>(
    +    private static final ObjectParser ALLOCATION_ID_PARSER = new ObjectParser<>(
                 "allocationId");
     
         static {
    @@ -203,6 +201,6 @@ public class AllocationId implements ToXContent, Writeable {
         }
     
         public static AllocationId fromXContent(XContentParser parser) throws IOException {
    -        return ALLOCATION_ID_PARSER.parse(parser, new AllocationId.Builder(), () -> ParseFieldMatcher.STRICT).build();
    +        return ALLOCATION_ID_PARSER.parse(parser, new AllocationId.Builder(), null).build();
         }
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java
    index 58ee6d70f2c..53590550d84 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java
    @@ -25,6 +25,7 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor;
     import org.apache.lucene.util.CollectionUtil;
     import org.elasticsearch.Version;
     import org.elasticsearch.cluster.AbstractDiffable;
    +import org.elasticsearch.cluster.Diff;
     import org.elasticsearch.cluster.metadata.IndexMetaData;
     import org.elasticsearch.cluster.metadata.MetaData;
     import org.elasticsearch.cluster.routing.RecoverySource.LocalShardsRecoverySource;
    @@ -63,8 +64,6 @@ import java.util.Set;
      */
     public class IndexRoutingTable extends AbstractDiffable implements Iterable {
     
    -    public static final IndexRoutingTable PROTO = builder(new Index("", "_na_")).build();
    -
         private final Index index;
         private final ShardShuffler shuffler;
     
    @@ -141,7 +140,7 @@ public class IndexRoutingTable extends AbstractDiffable imple
                     }
     
                     if (indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0_alpha1) &&
    -                    IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.getSettings()) == false && // see #20650
    +                    indexMetaData.isIndexUsingShadowReplicas() == false && // see #20650
                         shardRouting.primary() && shardRouting.initializing() && shardRouting.relocating() == false &&
                         RecoverySource.isInitialRecovery(shardRouting.recoverySource().getType()) == false &&
                         inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false)
    @@ -319,8 +318,7 @@ public class IndexRoutingTable extends AbstractDiffable imple
             return result;
         }
     
    -    @Override
    -    public IndexRoutingTable readFrom(StreamInput in) throws IOException {
    +    public static IndexRoutingTable readFrom(StreamInput in) throws IOException {
             Index index = new Index(in);
             Builder builder = new Builder(index);
     
    @@ -332,6 +330,10 @@ public class IndexRoutingTable extends AbstractDiffable imple
             return builder.build();
         }
     
    +    public static Diff readDiffFrom(StreamInput in) throws IOException {
    +        return readDiffFrom(IndexRoutingTable::readFrom, in);
    +    }
    +
         @Override
         public void writeTo(StreamOutput out) throws IOException {
             index.writeTo(out);
    @@ -354,17 +356,6 @@ public class IndexRoutingTable extends AbstractDiffable imple
                 this.index = index;
             }
     
    -        /**
    -         * Reads an {@link IndexRoutingTable} from an {@link StreamInput}
    -         *
    -         * @param in {@link StreamInput} to read the {@link IndexRoutingTable} from
    -         * @return {@link IndexRoutingTable} read
    -         * @throws IOException if something happens during read
    -         */
    -        public static IndexRoutingTable readFrom(StreamInput in) throws IOException {
    -            return PROTO.readFrom(in);
    -        }
    -
             /**
              * Initializes a new empty index, as if it was created from an API.
              */
    diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java
    index 94cb4b8c8e8..6881cc75657 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java
    @@ -92,13 +92,10 @@ public class OperationRouting extends AbstractComponent {
                 final Set effectiveRouting = routing.get(index);
                 if (effectiveRouting != null) {
                     for (String r : effectiveRouting) {
    -                    int shardId = generateShardId(indexMetaData, null, r);
    -                    IndexShardRoutingTable indexShard = indexRouting.shard(shardId);
    -                    if (indexShard == null) {
    -                        throw new ShardNotFoundException(new ShardId(indexRouting.getIndex(), shardId));
    +                    final int routingPartitionSize = indexMetaData.getRoutingPartitionSize();
    +                    for (int partitionOffset = 0; partitionOffset < routingPartitionSize; partitionOffset++) {
    +                        set.add(shardRoutingTable(indexRouting, calculateScaledShardId(indexMetaData, r, partitionOffset)));
                         }
    -                    // we might get duplicates, but that's ok, they will override one another
    -                    set.add(indexShard);
                     }
                 } else {
                     for (IndexShardRoutingTable indexShard : indexRouting) {
    @@ -187,6 +184,14 @@ public class OperationRouting extends AbstractComponent {
             }
         }
     
    +    private IndexShardRoutingTable shardRoutingTable(IndexRoutingTable indexRouting, int shardId) {
    +        IndexShardRoutingTable indexShard = indexRouting.shard(shardId);
    +        if (indexShard == null) {
    +            throw new ShardNotFoundException(new ShardId(indexRouting.getIndex(), shardId));
    +        }
    +        return indexShard;
    +    }
    +
         protected IndexRoutingTable indexRoutingTable(ClusterState clusterState, String index) {
             IndexRoutingTable indexRouting = clusterState.routingTable().index(index);
             if (indexRouting == null) {
    @@ -213,15 +218,33 @@ public class OperationRouting extends AbstractComponent {
             return new ShardId(indexMetaData.getIndex(), generateShardId(indexMetaData, id, routing));
         }
     
    -    static int generateShardId(IndexMetaData indexMetaData, String id, @Nullable String routing) {
    -        final int hash;
    +    static int generateShardId(IndexMetaData indexMetaData, @Nullable String id, @Nullable String routing) {
    +        final String effectiveRouting;
    +        final int partitionOffset;
    +
             if (routing == null) {
    -            hash = Murmur3HashFunction.hash(id);
    +            assert(indexMetaData.isRoutingPartitionedIndex() == false) : "A routing value is required for gets from a partitioned index";
    +            effectiveRouting = id;
             } else {
    -            hash = Murmur3HashFunction.hash(routing);
    +            effectiveRouting = routing;
             }
    +
    +        if (indexMetaData.isRoutingPartitionedIndex()) {
    +            partitionOffset = Math.floorMod(Murmur3HashFunction.hash(id), indexMetaData.getRoutingPartitionSize());
    +        } else {
    +            // we would have still got 0 above but this check just saves us an unnecessary hash calculation
    +            partitionOffset = 0;
    +        }
    +
    +        return calculateScaledShardId(indexMetaData, effectiveRouting, partitionOffset);
    +    }
    +
    +    private static int calculateScaledShardId(IndexMetaData indexMetaData, String effectiveRouting, int partitionOffset) {
    +        final int hash = Murmur3HashFunction.hash(effectiveRouting) + partitionOffset;
    +
             // we don't use IMD#getNumberOfShards since the index might have been shrunk such that we need to use the size
             // of original index to hash documents
             return Math.floorMod(hash, indexMetaData.getRoutingNumShards()) / indexMetaData.getRoutingFactor();
         }
    +
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java
    index 6b0b1324717..45d567b657e 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java
    @@ -540,7 +540,7 @@ public class RoutingNodes implements Iterable {
                     if (failedShard.primary()) {
                         // promote active replica to primary if active replica exists (only the case for shadow replicas)
                         ShardRouting activeReplica = activeReplica(failedShard.shardId());
    -                    assert activeReplica == null || IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.getSettings()) :
    +                    assert activeReplica == null || indexMetaData.isIndexUsingShadowReplicas() :
                             "initializing primary [" + failedShard + "] with active replicas [" + activeReplica + "] only expected when " +
                                 "using shadow replicas";
                         if (activeReplica == null) {
    @@ -599,7 +599,7 @@ public class RoutingNodes implements Iterable {
             assert activeReplica.started() : "replica relocation should have been cancelled: " + activeReplica;
             ShardRouting primarySwappedCandidate = promoteActiveReplicaShardToPrimary(activeReplica);
             routingChangesObserver.replicaPromoted(activeReplica);
    -        if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.getSettings())) {
    +        if (indexMetaData.isIndexUsingShadowReplicas()) {
                 ShardRouting initializedShard = reinitShadowPrimary(primarySwappedCandidate);
                 routingChangesObserver.startedPrimaryReinitialized(primarySwappedCandidate, initializedShard);
             }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
    index 051fd12a12b..0b1a0044567 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
    @@ -56,8 +56,6 @@ import java.util.function.Predicate;
      */
     public class RoutingTable implements Iterable, Diffable {
     
    -    public static RoutingTable PROTO = builder().build();
    -
         public static final RoutingTable EMPTY_ROUTING_TABLE = builder().build();
     
         private final long version;
    @@ -349,18 +347,16 @@ public class RoutingTable implements Iterable, Diffable readDiffFrom(StreamInput in) throws IOException {
    +    public static Diff readDiffFrom(StreamInput in) throws IOException {
             return new RoutingTableDiff(in);
         }
     
    -    @Override
    -    public RoutingTable readFrom(StreamInput in) throws IOException {
    +    public static RoutingTable readFrom(StreamInput in) throws IOException {
             Builder builder = new Builder();
             builder.version = in.readLong();
             int size = in.readVInt();
             for (int i = 0; i < size; i++) {
    -            IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in);
    +            IndexRoutingTable index = IndexRoutingTable.readFrom(in);
                 builder.add(index);
             }
     
    @@ -382,14 +378,15 @@ public class RoutingTable implements Iterable, Diffable> indicesRouting;
     
    -        public RoutingTableDiff(RoutingTable before, RoutingTable after) {
    +        RoutingTableDiff(RoutingTable before, RoutingTable after) {
                 version = after.version;
                 indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting, DiffableUtils.getStringKeySerializer());
             }
     
    -        public RoutingTableDiff(StreamInput in) throws IOException {
    +        RoutingTableDiff(StreamInput in) throws IOException {
                 version = in.readLong();
    -            indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexRoutingTable.PROTO);
    +            indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexRoutingTable::readFrom,
    +                IndexRoutingTable::readDiffFrom);
             }
     
             @Override
    @@ -607,10 +604,6 @@ public class RoutingTable implements Iterable, Diffable nodeDecisions, XContentBuilder builder, Params params)
             throws IOException {
     
    -        if (nodeDecisions != null) {
    +        if (nodeDecisions != null && nodeDecisions.isEmpty() == false) {
                 builder.startArray("node_allocation_decisions");
                 {
                     for (NodeAllocationResult explanation : nodeDecisions) {
    @@ -166,4 +167,21 @@ public abstract class AbstractAllocationDecision implements ToXContent, Writeabl
             return false;
         }
     
    +    @Override
    +    public boolean equals(Object other) {
    +        if (this == other) {
    +            return true;
    +        }
    +        if (other == null || other instanceof AbstractAllocationDecision == false) {
    +            return false;
    +        }
    +        @SuppressWarnings("unchecked") AbstractAllocationDecision that = (AbstractAllocationDecision) other;
    +        return Objects.equals(targetNode, that.targetNode) && Objects.equals(nodeDecisions, that.nodeDecisions);
    +    }
    +
    +    @Override
    +    public int hashCode() {
    +        return Objects.hash(targetNode, nodeDecisions);
    +    }
    +
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java
    index 1100d4d70d3..49b9604e345 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java
    @@ -174,7 +174,7 @@ public class AllocateUnassignedDecision extends AbstractAllocationDecision {
     
         @Override
         public boolean isDecisionTaken() {
    -        return this != NOT_TAKEN;
    +        return allocationStatus != AllocationStatus.NO_ATTEMPT;
         }
     
         /**
    @@ -238,35 +238,41 @@ public class AllocateUnassignedDecision extends AbstractAllocationDecision {
         @Override
         public String getExplanation() {
             checkDecisionState();
    -        String explanation;
    -        if (allocationStatus == null) {
    -            explanation = "can allocate the shard";
    -        } else if (allocationStatus == AllocationStatus.DECIDERS_THROTTLED) {
    -            explanation = "allocation temporarily throttled";
    -        } else if (allocationStatus == AllocationStatus.FETCHING_SHARD_DATA) {
    -            explanation = "cannot allocate because information about existing shard data is still being retrieved from " +
    -                              "some of the nodes";
    -        } else if (allocationStatus == AllocationStatus.NO_VALID_SHARD_COPY) {
    -            if (getNodeDecisions() != null && getNodeDecisions().size() > 0) {
    -                explanation = "cannot allocate because all existing copies of the shard are unreadable";
    +        AllocationDecision allocationDecision = getAllocationDecision();
    +        if (allocationDecision == AllocationDecision.YES) {
    +            return "can allocate the shard";
    +        } else if (allocationDecision == AllocationDecision.THROTTLED) {
    +            return "allocation temporarily throttled";
    +        } else if (allocationDecision == AllocationDecision.AWAITING_INFO) {
    +            return "cannot allocate because information about existing shard data is still being retrieved from some of the nodes";
    +        } else if (allocationDecision == AllocationDecision.NO_VALID_SHARD_COPY) {
    +            if (hasNodeWithStaleOrCorruptShard()) {
    +                return "cannot allocate because all found copies of the shard are either stale or corrupt";
                 } else {
    -                explanation = "cannot allocate because a previous copy of the shard existed but could not be found";
    +                return "cannot allocate because a previous copy of the primary shard existed but can no longer be found on " +
    +                       "the nodes in the cluster";
                 }
    -        } else if (allocationStatus == AllocationStatus.DELAYED_ALLOCATION) {
    -            explanation = "cannot allocate because the cluster is still waiting " +
    +        } else if (allocationDecision == AllocationDecision.ALLOCATION_DELAYED) {
    +            return "cannot allocate because the cluster is still waiting " +
                                   TimeValue.timeValueMillis(remainingDelayInMillis) +
                                   " for the departed node holding a replica to rejoin" +
                                   (atLeastOneNodeWithYesDecision() ?
                                        ", despite being allowed to allocate the shard to at least one other node" : "");
             } else {
    -            assert allocationStatus == AllocationStatus.DECIDERS_NO;
    +            assert allocationDecision == AllocationDecision.NO;
                 if (reuseStore) {
    -                explanation = "cannot allocate because allocation is not permitted to any of the nodes that hold an in-sync shard copy";
    +                return "cannot allocate because allocation is not permitted to any of the nodes that hold an in-sync shard copy";
                 } else {
    -                explanation = "cannot allocate because allocation is not permitted to any of the nodes";
    +                return "cannot allocate because allocation is not permitted to any of the nodes";
                 }
             }
    -        return explanation;
    +    }
    +
    +    private boolean hasNodeWithStaleOrCorruptShard() {
    +        return getNodeDecisions() != null && getNodeDecisions().stream().anyMatch(result ->
    +                result.getShardStoreInfo() != null
    +                    && (result.getShardStoreInfo().getAllocationId() != null
    +                            || result.getShardStoreInfo().getStoreException() != null));
         }
     
         @Override
    @@ -300,4 +306,26 @@ public class AllocateUnassignedDecision extends AbstractAllocationDecision {
             out.writeVLong(configuredDelayInMillis);
         }
     
    +    @Override
    +    public boolean equals(Object other) {
    +        if (super.equals(other) == false) {
    +            return false;
    +        }
    +        if (other instanceof AllocateUnassignedDecision == false) {
    +            return false;
    +        }
    +        @SuppressWarnings("unchecked") AllocateUnassignedDecision that = (AllocateUnassignedDecision) other;
    +        return Objects.equals(allocationStatus, that.allocationStatus)
    +                   && Objects.equals(allocationId, that.allocationId)
    +                   && reuseStore == that.reuseStore
    +                   && configuredDelayInMillis == that.configuredDelayInMillis
    +                   && remainingDelayInMillis == that.remainingDelayInMillis;
    +    }
    +
    +    @Override
    +    public int hashCode() {
    +        return 31 * super.hashCode() + Objects.hash(allocationStatus, allocationId, reuseStore,
    +            configuredDelayInMillis, remainingDelayInMillis);
    +    }
    +
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationDecision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationDecision.java
    index 5a49cfb142e..0fe9549635e 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationDecision.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationDecision.java
    @@ -40,7 +40,7 @@ public enum AllocationDecision implements Writeable {
         /**
          * The allocation attempt was throttled for the shard.
          */
    -    THROTTLE((byte) 1),
    +    THROTTLED((byte) 1),
         /**
          * The shard cannot be allocated, which can happen for any number of reasons,
          * including the allocation deciders gave a NO decision for allocating.
    @@ -56,12 +56,12 @@ public enum AllocationDecision implements Writeable {
          * Waiting on getting shard data from all nodes before making a decision
          * about where to allocate the shard.
          */
    -    FETCH_PENDING((byte) 4),
    +    AWAITING_INFO((byte) 4),
         /**
          * The allocation decision has been delayed waiting for a replica with a shard copy
          * that left the cluster to rejoin.
          */
    -    DELAYED_ALLOCATION((byte) 5),
    +    ALLOCATION_DELAYED((byte) 5),
         /**
          * The shard was denied allocation because there were no valid shard copies
          * found for it amongst the nodes in the cluster.
    @@ -90,15 +90,15 @@ public enum AllocationDecision implements Writeable {
                 case 0:
                     return YES;
                 case 1:
    -                return THROTTLE;
    +                return THROTTLED;
                 case 2:
                     return NO;
                 case 3:
                     return WORSE_BALANCE;
                 case 4:
    -                return FETCH_PENDING;
    +                return AWAITING_INFO;
                 case 5:
    -                return DELAYED_ALLOCATION;
    +                return ALLOCATION_DELAYED;
                 case 6:
                     return NO_VALID_SHARD_COPY;
                 case 7:
    @@ -117,11 +117,11 @@ public enum AllocationDecision implements Writeable {
             } else {
                 switch (allocationStatus) {
                     case DECIDERS_THROTTLED:
    -                    return THROTTLE;
    +                    return THROTTLED;
                     case FETCHING_SHARD_DATA:
    -                    return FETCH_PENDING;
    +                    return AWAITING_INFO;
                     case DELAYED_ALLOCATION:
    -                    return DELAYED_ALLOCATION;
    +                    return ALLOCATION_DELAYED;
                     case NO_VALID_SHARD_COPY:
                         return NO_VALID_SHARD_COPY;
                     case NO_ATTEMPT:
    @@ -141,7 +141,7 @@ public enum AllocationDecision implements Writeable {
                 case YES:
                     return YES;
                 case THROTTLE:
    -                return THROTTLE;
    +                return THROTTLED;
                 default:
                     assert type == Decision.Type.NO;
                     return NO;
    diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java
    index 676e6107cb2..de9795ff4c2 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java
    @@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
     
     import java.io.IOException;
     import java.util.List;
    +import java.util.Objects;
     
     /**
      * Represents a decision to move a started shard, either because it is no longer allowed to remain on its current node
    @@ -48,15 +49,15 @@ public final class MoveDecision extends AbstractAllocationDecision {
         @Nullable
         private final Decision canRemainDecision;
         @Nullable
    -    private final Decision canRebalanceDecision;
    +    private final Decision clusterRebalanceDecision;
         private final int currentNodeRanking;
     
    -    private MoveDecision(Decision canRemainDecision, Decision canRebalanceDecision, AllocationDecision allocationDecision,
    +    private MoveDecision(Decision canRemainDecision, Decision clusterRebalanceDecision, AllocationDecision allocationDecision,
                              DiscoveryNode assignedNode, List nodeDecisions, int currentNodeRanking) {
             super(assignedNode, nodeDecisions);
             this.allocationDecision = allocationDecision;
             this.canRemainDecision = canRemainDecision;
    -        this.canRebalanceDecision = canRebalanceDecision;
    +        this.clusterRebalanceDecision = clusterRebalanceDecision;
             this.currentNodeRanking = currentNodeRanking;
         }
     
    @@ -64,7 +65,7 @@ public final class MoveDecision extends AbstractAllocationDecision {
             super(in);
             allocationDecision = in.readOptionalWriteable(AllocationDecision::readFrom);
             canRemainDecision = in.readOptionalWriteable(Decision::readFrom);
    -        canRebalanceDecision = in.readOptionalWriteable(Decision::readFrom);
    +        clusterRebalanceDecision = in.readOptionalWriteable(Decision::readFrom);
             currentNodeRanking = in.readVInt();
         }
     
    @@ -73,7 +74,7 @@ public final class MoveDecision extends AbstractAllocationDecision {
             super.writeTo(out);
             out.writeOptionalWriteable(allocationDecision);
             out.writeOptionalWriteable(canRemainDecision);
    -        out.writeOptionalWriteable(canRebalanceDecision);
    +        out.writeOptionalWriteable(clusterRebalanceDecision);
             out.writeVInt(currentNodeRanking);
         }
     
    @@ -131,7 +132,15 @@ public final class MoveDecision extends AbstractAllocationDecision {
     
         @Override
         public boolean isDecisionTaken() {
    -        return this != NOT_TAKEN;
    +        return canRemainDecision != null || clusterRebalanceDecision != null;
    +    }
    +
    +    /**
    +     * Creates a new move decision from this decision, plus adding a remain decision.
    +     */
    +    public MoveDecision withRemainDecision(Decision canRemainDecision) {
    +        return new MoveDecision(canRemainDecision, clusterRebalanceDecision, allocationDecision,
    +                                   targetNode, nodeDecisions, currentNodeRanking);
         }
     
         /**
    @@ -164,13 +173,13 @@ public final class MoveDecision extends AbstractAllocationDecision {
     
         /**
          * Returns {@code true} if the shard is allowed to be rebalanced to another node in the cluster,
    -     * returns {@code false} otherwise.  If {@link #getCanRebalanceDecision()} returns {@code null}, then
    +     * returns {@code false} otherwise.  If {@link #getClusterRebalanceDecision()} returns {@code null}, then
          * the result of this method is meaningless, as no rebalance decision was taken.  If {@link #isDecisionTaken()}
          * returns {@code false}, then invoking this method will throw an {@code IllegalStateException}.
          */
    -    public boolean canRebalance() {
    +    public boolean canRebalanceCluster() {
             checkDecisionState();
    -        return canRebalanceDecision.type() == Type.YES;
    +        return clusterRebalanceDecision != null && clusterRebalanceDecision.type() == Type.YES;
         }
     
         /**
    @@ -182,9 +191,9 @@ public final class MoveDecision extends AbstractAllocationDecision {
          * {@code IllegalStateException}.
          */
         @Nullable
    -    public Decision getCanRebalanceDecision() {
    +    public Decision getClusterRebalanceDecision() {
             checkDecisionState();
    -        return canRebalanceDecision;
    +        return clusterRebalanceDecision;
         }
     
         /**
    @@ -199,7 +208,7 @@ public final class MoveDecision extends AbstractAllocationDecision {
         /**
          * Gets the current ranking of the node to which the shard is currently assigned, relative to the
          * other nodes in the cluster as reported in {@link NodeAllocationResult#getWeightRanking()}.  The
    -     * ranking will only return a meaningful positive integer if {@link #getCanRebalanceDecision()} returns
    +     * ranking will only return a meaningful positive integer if {@link #getClusterRebalanceDecision()} returns
          * a non-null value; otherwise, 0 will be returned.  If {@link #isDecisionTaken()} returns
          * {@code false}, then invoking this method will throw an {@code IllegalStateException}.
          */
    @@ -212,18 +221,19 @@ public final class MoveDecision extends AbstractAllocationDecision {
         public String getExplanation() {
             checkDecisionState();
             String explanation;
    -        if (canRebalanceDecision != null) {
    +        if (clusterRebalanceDecision != null) {
                 // it was a decision to rebalance the shard, because the shard was allowed to remain on its current node
    -            if (allocationDecision == AllocationDecision.FETCH_PENDING) {
    +            if (allocationDecision == AllocationDecision.AWAITING_INFO) {
                     explanation = "cannot rebalance as information about existing copies of this shard in the cluster is still being gathered";
    -            } else if (canRebalanceDecision.type() == Type.NO) {
    -                explanation = "rebalancing is not allowed on the cluster" + (atLeastOneNodeWithYesDecision() ? ", even though there " +
    +            } else if (clusterRebalanceDecision.type() == Type.NO) {
    +                explanation = "rebalancing is not allowed" + (atLeastOneNodeWithYesDecision() ? ", even though there " +
                                   "is at least one node on which the shard can be allocated" : "");
    -            } else if (canRebalanceDecision.type() == Type.THROTTLE) {
    +            } else if (clusterRebalanceDecision.type() == Type.THROTTLE) {
                     explanation = "rebalancing is throttled";
                 } else {
    +                assert clusterRebalanceDecision.type() == Type.YES;
                     if (getTargetNode() != null) {
    -                    if (allocationDecision == AllocationDecision.THROTTLE) {
    +                    if (allocationDecision == AllocationDecision.THROTTLED) {
                             explanation = "shard rebalancing throttled";
                         } else {
                             explanation = "can rebalance shard";
    @@ -235,11 +245,10 @@ public final class MoveDecision extends AbstractAllocationDecision {
                 }
             } else {
                 // it was a decision to force move the shard
    -            if (canRemain()) {
    -                explanation = "shard can remain on its current node";
    -            } else if (allocationDecision == AllocationDecision.YES) {
    +            assert canRemain() == false;
    +            if (allocationDecision == AllocationDecision.YES) {
                     explanation = "shard cannot remain on this node and is force-moved to another node";
    -            } else if (allocationDecision == AllocationDecision.THROTTLE) {
    +            } else if (allocationDecision == AllocationDecision.THROTTLED) {
                     explanation = "shard cannot remain on this node but is throttled on moving to another node";
                 } else {
                     assert allocationDecision == AllocationDecision.NO;
    @@ -263,23 +272,44 @@ public final class MoveDecision extends AbstractAllocationDecision {
                 canRemainDecision.toXContent(builder, params);
                 builder.endArray();
             }
    -        if (canRebalanceDecision != null) {
    -            AllocationDecision rebalanceDecision = AllocationDecision.fromDecisionType(canRebalanceDecision.type());
    +        if (clusterRebalanceDecision != null) {
    +            AllocationDecision rebalanceDecision = AllocationDecision.fromDecisionType(clusterRebalanceDecision.type());
                 builder.field("can_rebalance_cluster", rebalanceDecision);
    -            if (rebalanceDecision != AllocationDecision.YES && canRebalanceDecision.getDecisions().isEmpty() == false) {
    +            if (rebalanceDecision != AllocationDecision.YES && clusterRebalanceDecision.getDecisions().isEmpty() == false) {
                     builder.startArray("can_rebalance_cluster_decisions");
    -                canRebalanceDecision.toXContent(builder, params);
    +                clusterRebalanceDecision.toXContent(builder, params);
                     builder.endArray();
                 }
             }
    -        if (canRebalanceDecision != null) {
    +        if (clusterRebalanceDecision != null) {
                 builder.field("can_rebalance_to_other_node", allocationDecision);
    +            builder.field("rebalance_explanation", getExplanation());
             } else {
                 builder.field("can_move_to_other_node", forceMove() ? "yes" : "no");
    +            builder.field("move_explanation", getExplanation());
             }
    -        builder.field(canRebalanceDecision != null ? "rebalance_explanation" : "move_explanation", getExplanation());
             nodeDecisionsToXContent(nodeDecisions, builder, params);
             return builder;
         }
     
    +    @Override
    +    public boolean equals(Object other) {
    +        if (super.equals(other) == false) {
    +            return false;
    +        }
    +        if (other instanceof MoveDecision == false) {
    +            return false;
    +        }
    +        @SuppressWarnings("unchecked") MoveDecision that = (MoveDecision) other;
    +        return Objects.equals(allocationDecision, that.allocationDecision)
    +                   && Objects.equals(canRemainDecision, that.canRemainDecision)
    +                   && Objects.equals(clusterRebalanceDecision, that.clusterRebalanceDecision)
    +                   && currentNodeRanking == that.currentNodeRanking;
    +    }
    +
    +    @Override
    +    public int hashCode() {
    +        return 31 * super.hashCode() + Objects.hash(allocationDecision, canRemainDecision, clusterRebalanceDecision, currentNodeRanking);
    +    }
    +
     }
    diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java
    index 701bb66f8d9..3740ded3060 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java
    @@ -20,6 +20,7 @@
     package org.elasticsearch.cluster.routing.allocation;
     
     import org.elasticsearch.ElasticsearchException;
    +import org.elasticsearch.Version;
     import org.elasticsearch.cluster.node.DiscoveryNode;
     import org.elasticsearch.cluster.routing.allocation.decider.Decision;
     import org.elasticsearch.common.Nullable;
    @@ -48,14 +49,15 @@ public class NodeAllocationResult implements ToXContent, Writeable, Comparable weighShard(RoutingAllocation allocation, ShardRouting shard) {
    -        final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
    -        return balancer.weighShard(shard);
    -    }
    -
         @Override
         public void allocate(RoutingAllocation allocation) {
             if (allocation.routingNodes().size() == 0) {
    @@ -128,16 +124,21 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
             balancer.balance();
         }
     
    -    /**
    -     * Returns a decision on rebalancing a single shard to form a more optimal cluster balance.  This
    -     * method is not used in itself for cluster rebalancing because all shards from all indices are
    -     * taken into account when making rebalancing decisions.  This method is only intended to be used
    -     * from the cluster allocation explain API to explain possible rebalancing decisions for a single
    -     * shard.
    -     */
    -    public MoveDecision decideRebalance(final ShardRouting shard, final RoutingAllocation allocation) {
    -        assert allocation.debugDecision() : "debugDecision should be set in explain mode";
    -        return new Balancer(logger, allocation, weightFunction, threshold).decideRebalance(shard);
    +    @Override
    +    public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, final RoutingAllocation allocation) {
    +        Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
    +        AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN;
    +        MoveDecision moveDecision = MoveDecision.NOT_TAKEN;
    +        if (shard.unassigned()) {
    +            allocateUnassignedDecision = balancer.decideAllocateUnassigned(shard, Sets.newHashSet());
    +        } else {
    +            moveDecision = balancer.decideMove(shard);
    +            if (moveDecision.isDecisionTaken() && moveDecision.canRemain()) {
    +                MoveDecision rebalanceDecision = balancer.decideRebalance(shard);
    +                moveDecision = rebalanceDecision.withRemainDecision(moveDecision.getCanRemainDecision());
    +            }
    +        }
    +        return new ShardAllocationDecision(allocateUnassignedDecision, moveDecision);
         }
     
         /**
    @@ -337,7 +338,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
              */
             private MoveDecision decideRebalance(final ShardRouting shard) {
                 if (shard.started() == false) {
    -                // cannot rebalance a shard that isn't started
    +                // we can only rebalance started shards
                     return MoveDecision.NOT_TAKEN;
                 }
     
    @@ -437,7 +438,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
                 }
     
                 if (canRebalance.type() != Type.YES || allocation.hasPendingAsyncFetch()) {
    -                AllocationDecision allocationDecision = allocation.hasPendingAsyncFetch() ? AllocationDecision.FETCH_PENDING :
    +                AllocationDecision allocationDecision = allocation.hasPendingAsyncFetch() ? AllocationDecision.AWAITING_INFO :
                                                                 AllocationDecision.fromDecisionType(canRebalance.type());
                     return MoveDecision.cannotRebalance(canRebalance, allocationDecision, currentNodeWeightRanking, nodeDecisions);
                 } else {
    @@ -644,7 +645,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
                 // offloading the shards.
                 for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(); it.hasNext(); ) {
                     ShardRouting shardRouting = it.next();
    -                final MoveDecision moveDecision = makeMoveDecision(shardRouting);
    +                final MoveDecision moveDecision = decideMove(shardRouting);
                     if (moveDecision.isDecisionTaken() && moveDecision.forceMove()) {
                         final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId());
                         final ModelNode targetNode = nodes.get(moveDecision.getTargetNode().getId());
    @@ -673,7 +674,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
              *   4. If the method is invoked in explain mode (e.g. from the cluster allocation explain APIs), then
              *      {@link MoveDecision#nodeDecisions} will have a non-null value.
              */
    -        public MoveDecision makeMoveDecision(final ShardRouting shardRouting) {
    +        public MoveDecision decideMove(final ShardRouting shardRouting) {
                 if (shardRouting.started() == false) {
                     // we can only move started shards
                     return MoveDecision.NOT_TAKEN;
    @@ -1051,7 +1052,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
             private int numShards = 0;
             private final RoutingNode routingNode;
     
    -        public ModelNode(RoutingNode routingNode) {
    +        ModelNode(RoutingNode routingNode) {
                 this.routingNode = routingNode;
             }
     
    @@ -1129,7 +1130,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
             private final Set shards = new HashSet<>(4); // expect few shards of same index to be allocated on same node
             private int highestPrimary = -1;
     
    -        public ModelIndex(String id) {
    +        ModelIndex(String id) {
                 this.id = id;
             }
     
    @@ -1186,7 +1187,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
             private final Balancer balancer;
             private float pivotWeight;
     
    -        public NodeSorter(ModelNode[] modelNodes, WeightFunction function, Balancer balancer) {
    +        NodeSorter(ModelNode[] modelNodes, WeightFunction function, Balancer balancer) {
                 this.function = function;
                 this.balancer = balancer;
                 this.modelNodes = modelNodes;
    diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java
    index 35f3b265418..7e9d15b4528 100644
    --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java
    +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java
    @@ -19,11 +19,12 @@
     
     package org.elasticsearch.cluster.routing.allocation.allocator;
     
    -import org.elasticsearch.cluster.node.DiscoveryNode;
     import org.elasticsearch.cluster.routing.ShardRouting;
    +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
    +import org.elasticsearch.cluster.routing.allocation.MoveDecision;
     import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
    +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
     
    -import java.util.Map;
     /**
      * 

    * A {@link ShardsAllocator} is the main entry point for shard allocation on nodes in the cluster. @@ -44,13 +45,17 @@ public interface ShardsAllocator { void allocate(RoutingAllocation allocation); /** - * Returns a map of node to a float "weight" of where the allocator would like to place the shard. - * Higher weights signify greater desire to place the shard on that node. - * Does not modify the allocation at all. + * Returns the decision for where a shard should reside in the cluster. If the shard is unassigned, + * then the {@link AllocateUnassignedDecision} will be non-null. If the shard is not in the unassigned + * state, then the {@link MoveDecision} will be non-null. * - * @param allocation current node allocation - * @param shard shard to weigh - * @return map of nodes to float weights + * This method is primarily used by the cluster allocation explain API to provide detailed explanations + * for the allocation of a single shard. Implementations of the {@link #allocate(RoutingAllocation)} method + * may use the results of this method implementation to decide on allocating shards in the routing table + * to the cluster. + * + * If an implementation of this interface does not support explaining decisions for a single shard through + * the cluster explain API, then this method should throw a {@code UnsupportedOperationException}. */ - Map weighShard(RoutingAllocation allocation, ShardRouting shard); + ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java index 90a591b1199..4ffd70aee1c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -50,8 +49,8 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom private static final String SHARD_FIELD = "shard"; private static final String NODE_FIELD = "node"; - protected static > ObjectParser createAllocateParser(String command) { - ObjectParser parser = new ObjectParser<>(command); + protected static > ObjectParser createAllocateParser(String command) { + ObjectParser parser = new ObjectParser<>(command); parser.declareString(Builder::setIndex, new ParseField(INDEX_FIELD)); parser.declareInt(Builder::setShard, new ParseField(SHARD_FIELD)); parser.declareString(Builder::setNode, new ParseField(NODE_FIELD)); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java index 82d6f436d2a..157acc0e537 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java @@ -30,8 +30,6 @@ import org.elasticsearch.cluster.routing.allocation.RerouteExplanation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; @@ -49,8 +47,7 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation public static final String NAME = "allocate_empty_primary"; public static final ParseField COMMAND_NAME_FIELD = new ParseField(NAME); - private static final ObjectParser EMPTY_PRIMARY_PARSER = BasePrimaryAllocationCommand - .createAllocatePrimaryParser(NAME); + private static final ObjectParser EMPTY_PRIMARY_PARSER = BasePrimaryAllocationCommand.createAllocatePrimaryParser(NAME); /** * Creates a new {@link AllocateEmptyPrimaryAllocationCommand} @@ -83,7 +80,7 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation @Override public Builder parse(XContentParser parser) throws IOException { - return EMPTY_PRIMARY_PARSER.parse(parser, this, () -> ParseFieldMatcher.STRICT); + return EMPTY_PRIMARY_PARSER.parse(parser, this, null); } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java index 8c47deee66f..6ec09a9bbbb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java @@ -28,8 +28,6 @@ import org.elasticsearch.cluster.routing.allocation.RerouteExplanation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; @@ -46,8 +44,7 @@ public class AllocateReplicaAllocationCommand extends AbstractAllocateAllocation public static final String NAME = "allocate_replica"; public static final ParseField COMMAND_NAME_FIELD = new ParseField(NAME); - private static final ObjectParser REPLICA_PARSER = - createAllocateParser(NAME); + private static final ObjectParser REPLICA_PARSER = createAllocateParser(NAME); /** * Creates a new {@link AllocateReplicaAllocationCommand} @@ -80,7 +77,7 @@ public class AllocateReplicaAllocationCommand extends AbstractAllocateAllocation @Override public Builder parse(XContentParser parser) throws IOException { - return REPLICA_PARSER.parse(parser, this, () -> ParseFieldMatcher.STRICT); + return REPLICA_PARSER.parse(parser, this, null); } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java index acdd5cae30b..c643fb5c948 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java @@ -28,8 +28,6 @@ import org.elasticsearch.cluster.routing.allocation.RerouteExplanation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; @@ -46,8 +44,7 @@ public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocation public static final String NAME = "allocate_stale_primary"; public static final ParseField COMMAND_NAME_FIELD = new ParseField(NAME); - private static final ObjectParser STALE_PRIMARY_PARSER = BasePrimaryAllocationCommand - .createAllocatePrimaryParser(NAME); + private static final ObjectParser STALE_PRIMARY_PARSER = BasePrimaryAllocationCommand.createAllocatePrimaryParser(NAME); /** * Creates a new {@link AllocateStalePrimaryAllocationCommand} @@ -81,7 +78,7 @@ public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocation @Override public Builder parse(XContentParser parser) throws IOException { - return STALE_PRIMARY_PARSER.parse(parser, this, () -> ParseFieldMatcher.STRICT); + return STALE_PRIMARY_PARSER.parse(parser, this, null); } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java index 10ba3f55944..5098b027f61 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java @@ -23,7 +23,6 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -126,12 +125,10 @@ public class AllocationCommands extends ToXContentToBytes { * } *

    * @param parser {@link XContentParser} to read the commands from - * @param registry of allocation command parsers * @return {@link AllocationCommands} read * @throws IOException if something bad happens while reading the stream */ - public static AllocationCommands fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher, - AllocationCommandRegistry registry) throws IOException { + public static AllocationCommands fromXContent(XContentParser parser) throws IOException { AllocationCommands commands = new AllocationCommands(); XContentParser.Token token = parser.currentToken(); @@ -160,7 +157,7 @@ public class AllocationCommands extends ToXContentToBytes { token = parser.nextToken(); String commandName = parser.currentName(); token = parser.nextToken(); - commands.add(registry.lookup(commandName, parseFieldMatcher, parser.getTokenLocation()).fromXContent(parser)); + commands.add(parser.namedObject(AllocationCommand.class, commandName, null)); // move to the end object one if (parser.nextToken() != XContentParser.Token.END_OBJECT) { throw new ElasticsearchParseException("allocation command is malformed, done parsing a command, but didn't get END_OBJECT, got [{}] instead", token); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java index f4dc4fba4b8..2cb04260125 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -35,8 +34,8 @@ public abstract class BasePrimaryAllocationCommand extends AbstractAllocateAlloc private static final String ACCEPT_DATA_LOSS_FIELD = "accept_data_loss"; - protected static > ObjectParser createAllocatePrimaryParser(String command) { - ObjectParser parser = AbstractAllocateAllocationCommand.createAllocateParser(command); + protected static > ObjectParser createAllocatePrimaryParser(String command) { + ObjectParser parser = AbstractAllocateAllocationCommand.createAllocateParser(command); parser.declareBoolean(Builder::setAcceptDataLoss, new ParseField(ACCEPT_DATA_LOSS_FIELD)); return parser; } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java index 986613e5a42..53e67ba25a4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java @@ -23,13 +23,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import java.util.Collection; import java.util.Collections; -import java.util.List; -import java.util.Set; + +import static org.elasticsearch.cluster.routing.allocation.RoutingAllocation.DebugMode.EXCLUDE_YES_DECISIONS; /** * A composite {@link AllocationDecider} combining the "decision" of multiple @@ -56,7 +55,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } @@ -82,7 +82,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { // the assumption is that a decider that returns the static instance Decision#ALWAYS // does not really implements canAllocate ret.add(decision); @@ -112,7 +113,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } @@ -131,7 +133,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } @@ -150,7 +153,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } @@ -169,7 +173,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } @@ -188,7 +193,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } @@ -216,7 +222,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 93c45e7832f..4160fd224aa 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -78,12 +78,12 @@ public class AwarenessAllocationDecider extends AllocationDecider { public static final String NAME = "awareness"; public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = - new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , Property.Dynamic, + new Setting<>("cluster.routing.allocation.awareness.attributes", "", s -> Strings.tokenizeToStringArray(s, ","), Property.Dynamic, Property.NodeScope); public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", Property.Dynamic, Property.NodeScope); - private String[] awarenessAttributes; + private volatile String[] awarenessAttributes; private volatile Map forcedAwarenessAttributes; @@ -125,7 +125,7 @@ public class AwarenessAllocationDecider extends AllocationDecider { private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation, boolean moveToNode) { if (awarenessAttributes.length == 0) { return allocation.decision(Decision.YES, NAME, - "allocation awareness is not enabled, set [%s] to enable it", + "allocation awareness is not enabled, set cluster setting [%s] to enable it", CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey()); } @@ -135,7 +135,7 @@ public class AwarenessAllocationDecider extends AllocationDecider { // the node the shard exists on must be associated with an awareness attribute if (!node.node().getAttributes().containsKey(awarenessAttribute)) { return allocation.decision(Decision.NO, NAME, - "node does not contain the awareness attribute [%s]; required attributes [%s=%s]", + "node does not contain the awareness attribute [%s]; required attributes cluster setting [%s=%s]", awarenessAttribute, CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), allocation.debugDecision() ? Strings.arrayToCommaDelimitedString(awarenessAttributes) : null); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 4e4fb58799b..281f6a603c3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -120,13 +120,13 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { // check if there are unassigned primaries. if ( allocation.routingNodes().hasUnassignedPrimaries() ) { return allocation.decision(Decision.NO, NAME, - "the cluster has unassigned primary shards and [%s] is set to [%s]", + "the cluster has unassigned primary shards and cluster setting [%s] is set to [%s]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type); } // check if there are initializing primaries that don't have a relocatingNodeId entry. if ( allocation.routingNodes().hasInactivePrimaries() ) { return allocation.decision(Decision.NO, NAME, - "the cluster has inactive primary shards and [%s] is set to [%s]", + "the cluster has inactive primary shards and cluster setting [%s] is set to [%s]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type); } @@ -136,14 +136,14 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { // check if there are unassigned shards. if (allocation.routingNodes().hasUnassignedShards() ) { return allocation.decision(Decision.NO, NAME, - "the cluster has unassigned shards and [%s] is set to [%s]", + "the cluster has unassigned shards and cluster setting [%s] is set to [%s]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type); } // in case all indices are assigned, are there initializing shards which // are not relocating? if ( allocation.routingNodes().hasInactiveShards() ) { return allocation.decision(Decision.NO, NAME, - "the cluster has inactive shards and [%s] is set to [%s]", + "the cluster has inactive shards and cluster setting [%s] is set to [%s]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index 6ec123ddab3..63fbad59b92 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -67,7 +67,7 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { int relocatingShards = allocation.routingNodes().getRelocatingShardCount(); if (relocatingShards >= clusterConcurrentRebalance) { return allocation.decision(Decision.THROTTLE, NAME, - "reached the limit of concurrently rebalancing shards [%d], [%s=%d]", + "reached the limit of concurrently rebalancing shards [%d], cluster setting [%s=%d]", relocatingShards, CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), clusterConcurrentRebalance); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java index 34f612f6ac9..a2198ad90d9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java @@ -140,6 +140,12 @@ public abstract class Decision implements ToXContent, Writeable { @Nullable public abstract String label(); + /** + * Get the explanation for this decision. + */ + @Nullable + public abstract String getExplanation(); + /** * Return the list of all decisions that make up this decision */ @@ -200,6 +206,7 @@ public abstract class Decision implements ToXContent, Writeable { /** * Returns the explanation string, fully formatted. Only formats the string once. */ + @Override @Nullable public String getExplanation() { if (explanationString == null && explanation != null) { @@ -301,6 +308,12 @@ public abstract class Decision implements ToXContent, Writeable { return null; } + @Override + @Nullable + public String getExplanation() { + throw new UnsupportedOperationException("multi-level decisions do not have an explanation"); + } + @Override public List getDecisions() { return Collections.unmodifiableList(this.decisions); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 5eb1ae1751e..56663be1ef4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -138,7 +138,8 @@ public class DiskThresholdDecider extends AllocationDecider { diskThresholdSettings.getFreeBytesThresholdLow(), freeBytes, node.nodeId()); } return allocation.decision(Decision.NO, NAME, - "the node is above the low watermark [%s=%s], having less than the minimum required [%s] free space, actual free: [%s]", + "the node is above the low watermark cluster setting [%s=%s], having less than the minimum required [%s] free " + + "space, actual free: [%s]", CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getLowWatermarkRaw(), diskThresholdSettings.getFreeBytesThresholdLow(), new ByteSizeValue(freeBytes)); @@ -162,8 +163,8 @@ public class DiskThresholdDecider extends AllocationDecider { diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId()); } return allocation.decision(Decision.NO, NAME, - "the node is above the high watermark [%s=%s], having less than the minimum required [%s] free space, " + - "actual free: [%s]", + "the node is above the high watermark cluster setting [%s=%s], having less than the minimum required [%s] free " + + "space, actual free: [%s]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytes)); @@ -180,8 +181,8 @@ public class DiskThresholdDecider extends AllocationDecider { Strings.format1Decimals(usedDiskPercentage, "%"), node.nodeId()); } return allocation.decision(Decision.NO, NAME, - "the node is above the low watermark [%s=%s], using more disk space than the maximum allowed [%s%%], " + - "actual free: [%s%%]", + "the node is above the low watermark cluster setting [%s=%s], using more disk space than the maximum allowed " + + "[%s%%], actual free: [%s%%]", CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getLowWatermarkRaw(), usedDiskThresholdLow, freeDiskPercentage); } else if (freeDiskPercentage > diskThresholdSettings.getFreeDiskThresholdHigh()) { @@ -206,8 +207,8 @@ public class DiskThresholdDecider extends AllocationDecider { Strings.format1Decimals(freeDiskPercentage, "%"), node.nodeId()); } return allocation.decision(Decision.NO, NAME, - "the node is above the high watermark [%s=%s], using more disk space than the maximum allowed [%s%%], " + - "actual free: [%s%%]", + "the node is above the high watermark cluster setting [%s=%s], using more disk space than the maximum allowed " + + "[%s%%], actual free: [%s%%]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), usedDiskThresholdHigh, freeDiskPercentage); } @@ -222,7 +223,7 @@ public class DiskThresholdDecider extends AllocationDecider { "{} free bytes threshold ({} bytes free), preventing allocation", node.nodeId(), diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytesAfterShard); return allocation.decision(Decision.NO, NAME, - "allocating the shard to this node will bring the node above the high watermark [%s=%s] " + + "allocating the shard to this node will bring the node above the high watermark cluster setting [%s=%s] " + "and cause it to have less than the minimum required [%s] of free space (free bytes after shard added: [%s])", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), @@ -234,7 +235,7 @@ public class DiskThresholdDecider extends AllocationDecider { node.nodeId(), Strings.format1Decimals(diskThresholdSettings.getFreeDiskThresholdHigh(), "%"), Strings.format1Decimals(freeSpaceAfterShard, "%")); return allocation.decision(Decision.NO, NAME, - "allocating the shard to this node will bring the node above the high watermark [%s=%s] " + + "allocating the shard to this node will bring the node above the high watermark cluster setting [%s=%s] " + "and cause it to use more disk space than the maximum allowed [%s%%] (free space after shard added: [%s%%])", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), usedDiskThresholdHigh, freeSpaceAfterShard); @@ -279,7 +280,7 @@ public class DiskThresholdDecider extends AllocationDecider { diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId()); } return allocation.decision(Decision.NO, NAME, - "the shard cannot remain on this node because it is above the high watermark [%s=%s] " + + "the shard cannot remain on this node because it is above the high watermark cluster setting [%s=%s] " + "and there is less than the required [%s] free space on node, actual free: [%s]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), @@ -291,7 +292,7 @@ public class DiskThresholdDecider extends AllocationDecider { diskThresholdSettings.getFreeDiskThresholdHigh(), freeDiskPercentage, node.nodeId()); } return allocation.decision(Decision.NO, NAME, - "the shard cannot remain on this node because it is above the high watermark [%s=%s] " + + "the shard cannot remain on this node because it is above the high watermark cluster setting [%s=%s] " + "and there is less than the required [%s%%] free disk on node, actual free: [%s%%]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index b6995c6b972..7bb073a4c45 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -178,10 +178,12 @@ public class EnableAllocationDecider extends AllocationDecider { } private static String setting(Allocation allocation, boolean usedIndexSetting) { - StringBuilder buf = new StringBuilder("["); + StringBuilder buf = new StringBuilder(); if (usedIndexSetting) { + buf.append("index setting ["); buf.append(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey()); } else { + buf.append("cluster setting ["); buf.append(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey()); } buf.append("=").append(allocation.toString().toLowerCase(Locale.ROOT)).append("]"); @@ -189,10 +191,12 @@ public class EnableAllocationDecider extends AllocationDecider { } private static String setting(Rebalance rebalance, boolean usedIndexSetting) { - StringBuilder buf = new StringBuilder("["); + StringBuilder buf = new StringBuilder(); if (usedIndexSetting) { + buf.append("index setting ["); buf.append(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey()); } else { + buf.append("cluster setting ["); buf.append(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey()); } buf.append("=").append(rebalance.toString().toLowerCase(Locale.ROOT)).append("]"); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index 21b6b3d1354..85069392eb6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.IP_VALIDATOR; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; @@ -68,11 +69,11 @@ public class FilterAllocationDecider extends AllocationDecider { private static final String CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX = "cluster.routing.allocation.include"; private static final String CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX = "cluster.routing.allocation.exclude"; public static final Setting CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = - Setting.groupSetting(CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX + ".", Property.Dynamic, Property.NodeScope); + Setting.groupSetting(CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.NodeScope); public static final Setting CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = - Setting.groupSetting(CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX + ".", Property.Dynamic, Property.NodeScope); + Setting.groupSetting(CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.NodeScope); public static final Setting CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = - Setting.groupSetting(CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX + ".", Property.Dynamic, Property.NodeScope); + Setting.groupSetting(CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.NodeScope); private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters; @@ -141,19 +142,19 @@ public class FilterAllocationDecider extends AllocationDecider { private Decision shouldIndexFilter(IndexMetaData indexMd, RoutingNode node, RoutingAllocation allocation) { if (indexMd.requireFilters() != null) { if (!indexMd.requireFilters().match(node.node())) { - return allocation.decision(Decision.NO, NAME, "node does not match [%s] filters [%s]", + return allocation.decision(Decision.NO, NAME, "node does not match index setting [%s] filters [%s]", IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX, indexMd.requireFilters()); } } if (indexMd.includeFilters() != null) { if (!indexMd.includeFilters().match(node.node())) { - return allocation.decision(Decision.NO, NAME, "node does not match [%s] filters [%s]", + return allocation.decision(Decision.NO, NAME, "node does not match index setting [%s] filters [%s]", IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_PREFIX, indexMd.includeFilters()); } } if (indexMd.excludeFilters() != null) { if (indexMd.excludeFilters().match(node.node())) { - return allocation.decision(Decision.NO, NAME, "node matches [%s] filters [%s]", + return allocation.decision(Decision.NO, NAME, "node matches index setting [%s] filters [%s]", IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey(), indexMd.excludeFilters()); } } @@ -163,19 +164,19 @@ public class FilterAllocationDecider extends AllocationDecider { private Decision shouldClusterFilter(RoutingNode node, RoutingAllocation allocation) { if (clusterRequireFilters != null) { if (!clusterRequireFilters.match(node.node())) { - return allocation.decision(Decision.NO, NAME, "node does not match [%s] filters [%s]", + return allocation.decision(Decision.NO, NAME, "node does not match cluster setting [%s] filters [%s]", CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX, clusterRequireFilters); } } if (clusterIncludeFilters != null) { if (!clusterIncludeFilters.match(node.node())) { - return allocation.decision(Decision.NO, NAME, "node does not [%s] filters [%s]", + return allocation.decision(Decision.NO, NAME, "node does not cluster setting [%s] filters [%s]", CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX, clusterIncludeFilters); } } if (clusterExcludeFilters != null) { if (clusterExcludeFilters.match(node.node())) { - return allocation.decision(Decision.NO, NAME, "node matches [%s] filters [%s]", + return allocation.decision(Decision.NO, NAME, "node matches cluster setting [%s] filters [%s]", CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX, clusterExcludeFilters); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java index 30fed539b79..387360f1a04 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java @@ -88,7 +88,8 @@ public class SameShardAllocationDecider extends AllocationDecider { String host = checkNodeOnSameHostAddress ? node.node().getHostAddress() : node.node().getHostName(); return allocation.decision(Decision.NO, NAME, "the shard cannot be allocated on host %s [%s], where it already exists on node [%s]; " + - "set [%s] to false to allow multiple nodes on the same host to hold the same shard copies", + "set cluster setting [%s] to false to allow multiple nodes on the same host to hold the same " + + "shard copies", hostType, host, node.nodeId(), CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey()); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index e6f0fbcd645..2118d37fe47 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -122,12 +122,12 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { if (clusterShardLimit > 0 && decider.test(nodeShardCount, clusterShardLimit)) { return allocation.decision(Decision.NO, NAME, - "too many shards [%d] allocated to this node, [%s=%d]", + "too many shards [%d] allocated to this node, cluster setting [%s=%d]", nodeShardCount, CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), clusterShardLimit); } if (indexShardLimit > 0 && decider.test(indexShardCount, indexShardLimit)) { return allocation.decision(Decision.NO, NAME, - "too many shards [%d] allocated to this node for index [%s], [%s=%d]", + "too many shards [%d] allocated to this node for index [%s], index setting [%s=%d]", indexShardCount, shardRouting.getIndexName(), INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), indexShardLimit); } return allocation.decision(Decision.YES, NAME, @@ -157,7 +157,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { } if (clusterShardLimit >= 0 && nodeShardCount >= clusterShardLimit) { return allocation.decision(Decision.NO, NAME, - "too many shards [%d] allocated to this node, [%s=%d]", + "too many shards [%d] allocated to this node, cluster setting [%s=%d]", nodeShardCount, CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), clusterShardLimit); } return allocation.decision(Decision.YES, NAME, diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index a59f543ac3f..721de71435d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -126,7 +126,8 @@ public class ThrottlingAllocationDecider extends AllocationDecider { } if (primariesInRecovery >= primariesInitialRecoveries) { // TODO: Should index creation not be throttled for primary shards? - return allocation.decision(THROTTLE, NAME, "reached the limit of ongoing initial primary recoveries [%d], [%s=%d]", + return allocation.decision(THROTTLE, NAME, + "reached the limit of ongoing initial primary recoveries [%d], cluster setting [%s=%d]", primariesInRecovery, CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), primariesInitialRecoveries); } else { @@ -140,7 +141,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider { int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId()); if (currentInRecoveries >= concurrentIncomingRecoveries) { return allocation.decision(THROTTLE, NAME, - "reached the limit of incoming shard recoveries [%d], [%s=%d] (can also be set via [%s])", + "reached the limit of incoming shard recoveries [%d], cluster setting [%s=%d] (can also be set via [%s])", currentInRecoveries, CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), concurrentIncomingRecoveries, CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey()); @@ -154,7 +155,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider { if (primaryNodeOutRecoveries >= concurrentOutgoingRecoveries) { return allocation.decision(THROTTLE, NAME, "reached the limit of outgoing shard recoveries [%d] on the node [%s] which holds the primary, " + - "[%s=%d] (can also be set via [%s])", + "cluster setting [%s=%d] (can also be set via [%s])", primaryNodeOutRecoveries, node.nodeId(), CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), concurrentOutgoingRecoveries, diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index d213cea4d33..e2040312e09 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -31,9 +31,8 @@ import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskExecutor; -import org.elasticsearch.cluster.ClusterStateTaskExecutor.BatchResult; +import org.elasticsearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; import org.elasticsearch.cluster.ClusterStateTaskListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.LocalNodeMasterListener; import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.TimeoutClusterStateListener; @@ -64,6 +63,7 @@ import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.PrioritizedRunnable; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -102,6 +102,7 @@ public class ClusterService extends AbstractLifecycleComponent { public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; private final ThreadPool threadPool; private final ClusterName clusterName; + private final Supplier localNodeSupplier; private BiConsumer clusterStatePublisher; @@ -137,9 +138,12 @@ public class ClusterService extends AbstractLifecycleComponent { private NodeConnectionsService nodeConnectionsService; + private DiscoverySettings discoverySettings; + public ClusterService(Settings settings, - ClusterSettings clusterSettings, ThreadPool threadPool) { + ClusterSettings clusterSettings, ThreadPool threadPool, Supplier localNodeSupplier) { super(settings); + this.localNodeSupplier = localNodeSupplier; this.operationRouting = new OperationRouting(settings, clusterSettings); this.threadPool = threadPool; this.clusterSettings = clusterSettings; @@ -165,14 +169,6 @@ public class ClusterService extends AbstractLifecycleComponent { clusterStatePublisher = publisher; } - public synchronized void setLocalNode(DiscoveryNode localNode) { - assert state().nodes().getLocalNodeId() == null : "local node is already set"; - updateState(clusterState -> { - DiscoveryNodes nodes = DiscoveryNodes.builder(clusterState.nodes()).add(localNode).localNodeId(localNode.getId()).build(); - return ClusterState.builder(clusterState).nodes(nodes).build(); - }); - } - private void updateState(UnaryOperator updateFunction) { this.state.getAndUpdate(updateFunction); } @@ -212,10 +208,16 @@ public class ClusterService extends AbstractLifecycleComponent { @Override protected synchronized void doStart() { Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); - Objects.requireNonNull(state().nodes().getLocalNode(), "please set the local node before starting"); Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting"); + Objects.requireNonNull(discoverySettings, "please set discovery settings before starting"); addListener(localNodeMasterListeners); - updateState(state -> ClusterState.builder(state).blocks(initialBlocks).build()); + DiscoveryNode localNode = localNodeSupplier.get(); + assert localNode != null; + updateState(state -> { + assert state.nodes().getLocalNodeId() == null : "local node is already set"; + DiscoveryNodes nodes = DiscoveryNodes.builder(state.nodes()).add(localNode).localNodeId(localNode.getId()).build(); + return ClusterState.builder(state).nodes(nodes).blocks(initialBlocks).build(); + }); this.threadPoolExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), threadPool.getThreadContext()); } @@ -379,11 +381,11 @@ public class ClusterService extends AbstractLifecycleComponent { * task * */ - public void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask) { + public & ClusterStateTaskListener> void submitStateUpdateTask( + final String source, final T updateTask) { submitStateUpdateTask(source, updateTask, updateTask, updateTask, updateTask); } - /** * Submits a cluster state update task; submitted updates will be * batched across the same instance of executor. The exact batching @@ -573,10 +575,14 @@ public class ClusterService extends AbstractLifecycleComponent { return clusterName; } + public void setDiscoverySettings(DiscoverySettings discoverySettings) { + this.discoverySettings = discoverySettings; + } + abstract static class SourcePrioritizedRunnable extends PrioritizedRunnable { protected final String source; - public SourcePrioritizedRunnable(Priority priority, String source) { + SourcePrioritizedRunnable(Priority priority, String source) { super(priority); this.source = source; } @@ -643,29 +649,28 @@ public class ClusterService extends AbstractLifecycleComponent { } public TaskOutputs calculateTaskOutputs(TaskInputs taskInputs, ClusterState previousClusterState, long startTimeNS) { - BatchResult batchResult = executeTasks(taskInputs, startTimeNS, previousClusterState); - ClusterState newClusterState = batchResult.resultingState; + ClusterTasksResult clusterTasksResult = executeTasks(taskInputs, startTimeNS, previousClusterState); // extract those that are waiting for results List nonFailedTasks = new ArrayList<>(); for (UpdateTask updateTask : taskInputs.updateTasks) { - assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask; + assert clusterTasksResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask; final ClusterStateTaskExecutor.TaskResult taskResult = - batchResult.executionResults.get(updateTask.task); + clusterTasksResult.executionResults.get(updateTask.task); if (taskResult.isSuccess()) { nonFailedTasks.add(updateTask); } } - newClusterState = patchVersions(previousClusterState, newClusterState); + ClusterState newClusterState = patchVersionsAndNoMasterBlocks(previousClusterState, clusterTasksResult); return new TaskOutputs(taskInputs, previousClusterState, newClusterState, nonFailedTasks, - batchResult.executionResults); + clusterTasksResult.executionResults); } - private BatchResult executeTasks(TaskInputs taskInputs, long startTimeNS, ClusterState previousClusterState) { - BatchResult batchResult; + private ClusterTasksResult executeTasks(TaskInputs taskInputs, long startTimeNS, ClusterState previousClusterState) { + ClusterTasksResult clusterTasksResult; try { List inputs = taskInputs.updateTasks.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); - batchResult = taskInputs.executor.execute(previousClusterState, inputs); + clusterTasksResult = taskInputs.executor.execute(previousClusterState, inputs); } catch (Exception e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { @@ -681,42 +686,70 @@ public class ClusterService extends AbstractLifecycleComponent { e); } warnAboutSlowTaskIfNeeded(executionTime, taskInputs.summary); - batchResult = BatchResult.builder() + clusterTasksResult = ClusterTasksResult.builder() .failures(taskInputs.updateTasks.stream().map(updateTask -> updateTask.task)::iterator, e) .build(previousClusterState); } - assert batchResult.executionResults != null; - assert batchResult.executionResults.size() == taskInputs.updateTasks.size() + assert clusterTasksResult.executionResults != null; + assert clusterTasksResult.executionResults.size() == taskInputs.updateTasks.size() : String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", taskInputs.updateTasks.size(), - taskInputs.updateTasks.size() == 1 ? "" : "s", batchResult.executionResults.size()); + taskInputs.updateTasks.size() == 1 ? "" : "s", clusterTasksResult.executionResults.size()); boolean assertsEnabled = false; assert (assertsEnabled = true); if (assertsEnabled) { for (UpdateTask updateTask : taskInputs.updateTasks) { - assert batchResult.executionResults.containsKey(updateTask.task) : + assert clusterTasksResult.executionResults.containsKey(updateTask.task) : "missing task result for " + updateTask; } } - return batchResult; + return clusterTasksResult; } - private ClusterState patchVersions(ClusterState previousClusterState, ClusterState newClusterState) { - if (previousClusterState != newClusterState) { - if (newClusterState.nodes().isLocalNodeElectedMaster()) { - // only the master controls the version numbers - Builder builder = ClusterState.builder(newClusterState).incrementVersion(); - if (previousClusterState.routingTable() != newClusterState.routingTable()) { - builder.routingTable(RoutingTable.builder(newClusterState.routingTable()) - .version(newClusterState.routingTable().version() + 1).build()); - } - if (previousClusterState.metaData() != newClusterState.metaData()) { - builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1)); - } - newClusterState = builder.build(); + private ClusterState patchVersionsAndNoMasterBlocks(ClusterState previousClusterState, ClusterTasksResult executionResult) { + ClusterState newClusterState = executionResult.resultingState; + + if (executionResult.noMaster) { + assert newClusterState == previousClusterState : "state can only be changed by ClusterService when noMaster = true"; + if (previousClusterState.nodes().getMasterNodeId() != null) { + // remove block if it already exists before adding new one + assert previousClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock().id()) == false : + "NO_MASTER_BLOCK should only be added by ClusterService"; + ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(previousClusterState.blocks()) + .addGlobalBlock(discoverySettings.getNoMasterBlock()) + .build(); + + DiscoveryNodes discoveryNodes = new DiscoveryNodes.Builder(previousClusterState.nodes()).masterNodeId(null).build(); + newClusterState = ClusterState.builder(previousClusterState) + .blocks(clusterBlocks) + .nodes(discoveryNodes) + .build(); } + } else if (newClusterState.nodes().isLocalNodeElectedMaster() && previousClusterState != newClusterState) { + // only the master controls the version numbers + Builder builder = ClusterState.builder(newClusterState).incrementVersion(); + if (previousClusterState.routingTable() != newClusterState.routingTable()) { + builder.routingTable(RoutingTable.builder(newClusterState.routingTable()) + .version(newClusterState.routingTable().version() + 1).build()); + } + if (previousClusterState.metaData() != newClusterState.metaData()) { + builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1)); + } + + // remove the no master block, if it exists + if (newClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock().id())) { + builder.blocks(ClusterBlocks.builder().blocks(newClusterState.blocks()) + .removeGlobalBlock(discoverySettings.getNoMasterBlock().id())); + } + + newClusterState = builder.build(); } + + assert newClusterState.nodes().getMasterNodeId() == null || + newClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock().id()) == false : + "cluster state with master node must not have NO_MASTER_BLOCK"; + return newClusterState; } @@ -738,7 +771,7 @@ public class ClusterService extends AbstractLifecycleComponent { taskOutputs.createAckListener(threadPool, newClusterState) : null; - nodeConnectionsService.connectToNodes(clusterChangedEvent.nodesDelta().addedNodes()); + nodeConnectionsService.connectToNodes(newClusterState.nodes()); // if we are the master, publish the new state to all nodes // we publish here before we send a notification to all the listeners, since if it fails @@ -754,7 +787,8 @@ public class ClusterService extends AbstractLifecycleComponent { "failing [{}]: failed to commit cluster state version [{}]", taskInputs.summary, version), t); // ensure that list of connected nodes in NodeConnectionsService is in-sync with the nodes of the current cluster state - nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().addedNodes()); + nodeConnectionsService.connectToNodes(previousClusterState.nodes()); + nodeConnectionsService.disconnectFromNodesExcept(previousClusterState.nodes()); taskOutputs.publishingFailed(t); return; } @@ -774,7 +808,7 @@ public class ClusterService extends AbstractLifecycleComponent { logger.debug("set local cluster state to version {}", newClusterState.version()); callClusterStateAppliers(newClusterState, clusterChangedEvent); - nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().removedNodes()); + nodeConnectionsService.disconnectFromNodesExcept(newClusterState.nodes()); updateState(css -> newClusterState); @@ -801,14 +835,16 @@ public class ClusterService extends AbstractLifecycleComponent { taskOutputs.processedDifferentClusterState(previousClusterState, newClusterState); - try { - taskOutputs.clusterStatePublished(clusterChangedEvent); - } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( - "exception thrown while notifying executor of new cluster state publication [{}]", - taskInputs.summary), - e); + if (newClusterState.nodes().isLocalNodeElectedMaster()) { + try { + taskOutputs.clusterStatePublished(clusterChangedEvent); + } catch (Exception e) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "exception thrown while notifying executor of new cluster state publication [{}]", + taskInputs.summary), + e); + } } } @@ -856,7 +892,7 @@ public class ClusterService extends AbstractLifecycleComponent { public final List nonFailedTasks; public final Map executionResults; - public TaskOutputs(TaskInputs taskInputs, ClusterState previousClusterState, + TaskOutputs(TaskInputs taskInputs, ClusterState previousClusterState, ClusterState newClusterState, List nonFailedTasks, Map executionResults) { this.taskInputs = taskInputs; @@ -946,7 +982,7 @@ public class ClusterService extends AbstractLifecycleComponent { private final ClusterStateTaskListener listener; private final Logger logger; - public SafeClusterStateTaskListener(ClusterStateTaskListener listener, Logger logger) { + SafeClusterStateTaskListener(ClusterStateTaskListener listener, Logger logger) { this.listener = listener; this.logger = logger; } @@ -993,7 +1029,7 @@ public class ClusterService extends AbstractLifecycleComponent { private final AckedClusterStateTaskListener listener; private final Logger logger; - public SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, Logger logger) { + SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, Logger logger) { super(listener, logger); this.listener = listener; this.logger = logger; diff --git a/core/src/main/java/org/elasticsearch/common/Booleans.java b/core/src/main/java/org/elasticsearch/common/Booleans.java index 9ec1ac968ac..025174c477d 100644 --- a/core/src/main/java/org/elasticsearch/common/Booleans.java +++ b/core/src/main/java/org/elasticsearch/common/Booleans.java @@ -19,14 +19,128 @@ package org.elasticsearch.common; -public class Booleans { +public final class Booleans { + private Booleans() { + throw new AssertionError("No instances intended"); + } + + /** + * Parses a char[] representation of a boolean value to boolean. + * + * @return true iff the sequence of chars is "true", false iff the sequence of chars is "false" or the + * provided default value iff either text is null or length == 0. + * @throws IllegalArgumentException if the string cannot be parsed to boolean. + */ + public static boolean parseBoolean(char[] text, int offset, int length, boolean defaultValue) { + if (text == null || length == 0) { + return defaultValue; + } else { + return parseBoolean(new String(text, offset, length)); + } + } + + /** + * returns true iff the sequence of chars is one of "true","false". + * + * @param text sequence to check + * @param offset offset to start + * @param length length to check + */ + public static boolean isBoolean(char[] text, int offset, int length) { + if (text == null || length == 0) { + return false; + } + return isBoolean(new String(text, offset, length)); + } + + public static boolean isBoolean(String value) { + return isFalse(value) || isTrue(value); + } + + /** + * Parses a string representation of a boolean value to boolean. + * + * @return true iff the provided value is "true". false iff the provided value is "false". + * @throws IllegalArgumentException if the string cannot be parsed to boolean. + */ + public static boolean parseBoolean(String value) { + if (isFalse(value)) { + return false; + } + if (isTrue(value)) { + return true; + } + throw new IllegalArgumentException("Failed to parse value [" + value + "] as only [true] or [false] are allowed."); + } + + /** + * + * @param value text to parse. + * @param defaultValue The default value to return if the provided value is null. + * @return see {@link #parseBoolean(String)} + */ + public static boolean parseBoolean(String value, boolean defaultValue) { + if (Strings.hasText(value)) { + return parseBoolean(value); + } + return defaultValue; + } + + public static Boolean parseBoolean(String value, Boolean defaultValue) { + if (Strings.hasText(value)) { + return parseBoolean(value); + } + return defaultValue; + } /** * Returns false if text is in false, 0, off, no; else, true + * + * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, Boolean)} instead. */ - public static boolean parseBoolean(char[] text, int offset, int length, boolean defaultValue) { - // TODO: the leniency here is very dangerous: a simple typo will be misinterpreted and the user won't know. - // We should remove it and cutover to https://github.com/rmuir/booleanparser + @Deprecated + public static Boolean parseBooleanLenient(String value, Boolean defaultValue) { + if (value == null) { // only for the null case we do that here! + return defaultValue; + } + return parseBooleanLenient(value, false); + } + /** + * Returns true iff the value is neither of the following: + * false, 0, off, no + * otherwise false + * + * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, boolean)} instead. + */ + @Deprecated + public static boolean parseBooleanLenient(String value, boolean defaultValue) { + if (value == null) { + return defaultValue; + } + return !(value.equals("false") || value.equals("0") || value.equals("off") || value.equals("no")); + } + + /** + * @return true iff the value is false, otherwise false. + */ + public static boolean isFalse(String value) { + return "false".equals(value); + } + + /** + * @return true iff the value is true, otherwise false + */ + public static boolean isTrue(String value) { + return "true".equals(value); + } + + /** + * Returns false if text is in false, 0, off, no; else, true + * + * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(char[], int, int, boolean)} instead + */ + @Deprecated + public static boolean parseBooleanLenient(char[] text, int offset, int length, boolean defaultValue) { if (text == null || length == 0) { return defaultValue; } @@ -40,7 +154,8 @@ public class Booleans { return !(text[offset] == 'o' && text[offset + 1] == 'f' && text[offset + 2] == 'f'); } if (length == 5) { - return !(text[offset] == 'f' && text[offset + 1] == 'a' && text[offset + 2] == 'l' && text[offset + 3] == 's' && text[offset + 4] == 'e'); + return !(text[offset] == 'f' && text[offset + 1] == 'a' && text[offset + 2] == 'l' && text[offset + 3] == 's' && + text[offset + 4] == 'e'); } return true; } @@ -51,8 +166,11 @@ public class Booleans { * @param text sequence to check * @param offset offset to start * @param length length to check + * + * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #isBoolean(char[], int, int)} instead. */ - public static boolean isBoolean(char[] text, int offset, int length) { + @Deprecated + public static boolean isBooleanLenient(char[] text, int offset, int length) { if (text == null || length == 0) { return false; } @@ -64,69 +182,16 @@ public class Booleans { } if (length == 3) { return (text[offset] == 'o' && text[offset + 1] == 'f' && text[offset + 2] == 'f') || - (text[offset] == 'y' && text[offset + 1] == 'e' && text[offset + 2] == 's'); + (text[offset] == 'y' && text[offset + 1] == 'e' && text[offset + 2] == 's'); } if (length == 4) { return (text[offset] == 't' && text[offset + 1] == 'r' && text[offset + 2] == 'u' && text[offset + 3] == 'e'); } if (length == 5) { - return (text[offset] == 'f' && text[offset + 1] == 'a' && text[offset + 2] == 'l' && text[offset + 3] == 's' && text[offset + 4] == 'e'); + return (text[offset] == 'f' && text[offset + 1] == 'a' && text[offset + 2] == 'l' && text[offset + 3] == 's' && + text[offset + 4] == 'e'); } return false; } - /*** - * - * @return true/false - * throws exception if string cannot be parsed to boolean - */ - public static Boolean parseBooleanExact(String value) { - boolean isFalse = isExplicitFalse(value); - if (isFalse) { - return false; - } - boolean isTrue = isExplicitTrue(value); - if (isTrue) { - return true; - } - - throw new IllegalArgumentException("Failed to parse value [" + value + "] cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ]"); - } - - public static Boolean parseBoolean(String value, Boolean defaultValue) { - if (value == null) { // only for the null case we do that here! - return defaultValue; - } - return parseBoolean(value, false); - } - /** - * Returns true iff the value is neither of the following: - * false, 0, off, no - * otherwise false - */ - public static boolean parseBoolean(String value, boolean defaultValue) { - if (value == null) { - return defaultValue; - } - return !(value.equals("false") || value.equals("0") || value.equals("off") || value.equals("no")); - } - - /** - * Returns true iff the value is either of the following: - * false, 0, off, no - * otherwise false - */ - public static boolean isExplicitFalse(String value) { - return value != null && (value.equals("false") || value.equals("0") || value.equals("off") || value.equals("no")); - } - - /** - * Returns true iff the value is either of the following: - * true, 1, on, yes - * otherwise false - */ - public static boolean isExplicitTrue(String value) { - return value != null && (value.equals("true") || value.equals("1") || value.equals("on") || value.equals("yes")); - } - } diff --git a/core/src/main/java/org/elasticsearch/search/SearchExtRegistry.java b/core/src/main/java/org/elasticsearch/common/CheckedFunction.java similarity index 73% rename from core/src/main/java/org/elasticsearch/search/SearchExtRegistry.java rename to core/src/main/java/org/elasticsearch/common/CheckedFunction.java index dd04145ba7d..4a2d222db0b 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchExtRegistry.java +++ b/core/src/main/java/org/elasticsearch/common/CheckedFunction.java @@ -17,16 +17,14 @@ * under the License. */ -package org.elasticsearch.search; +package org.elasticsearch.common; -import org.elasticsearch.common.xcontent.ParseFieldRegistry; +import java.util.function.Function; /** - * Extensions to ParseFieldRegistry to make Guice happy. + * A {@link Function}-like interface which allows throwing checked exceptions. */ -public class SearchExtRegistry extends ParseFieldRegistry { - - public SearchExtRegistry() { - super("ext"); - } +@FunctionalInterface +public interface CheckedFunction { + R apply(T t) throws E; } diff --git a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java b/core/src/main/java/org/elasticsearch/common/CheckedRunnable.java similarity index 69% rename from core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java rename to core/src/main/java/org/elasticsearch/common/CheckedRunnable.java index f1d45b55495..196eb53a878 100644 --- a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java +++ b/core/src/main/java/org/elasticsearch/common/CheckedRunnable.java @@ -17,16 +17,14 @@ * under the License. */ -package org.elasticsearch.indices.query; +package org.elasticsearch.common; -import org.elasticsearch.common.xcontent.ParseFieldRegistry; -import org.elasticsearch.index.query.QueryParser; +import java.lang.Runnable; /** - * Extensions to ParseFieldRegistry to make Guice happy. + * A {@link Runnable}-like interface which allows throwing checked exceptions. */ -public class IndicesQueriesRegistry extends ParseFieldRegistry> { - public IndicesQueriesRegistry() { - super("query"); - } +@FunctionalInterface +public interface CheckedRunnable { + void run() throws E; } diff --git a/core/src/main/java/org/elasticsearch/common/CheckedSupplier.java b/core/src/main/java/org/elasticsearch/common/CheckedSupplier.java new file mode 100644 index 00000000000..8f4d2edea0c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/CheckedSupplier.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +import java.util.function.Supplier; + +/** + * A {@link Supplier}-like interface which allows throwing checked exceptions. + */ +@FunctionalInterface +public interface CheckedSupplier { + R get() throws E; +} diff --git a/core/src/main/java/org/elasticsearch/common/ParseField.java b/core/src/main/java/org/elasticsearch/common/ParseField.java index 7121be7d1d8..fc9377eeb2f 100644 --- a/core/src/main/java/org/elasticsearch/common/ParseField.java +++ b/core/src/main/java/org/elasticsearch/common/ParseField.java @@ -101,14 +101,10 @@ public class ParseField { /** * @param fieldName * the field name to match against this {@link ParseField} - * @param strict - * if true an exception will be thrown if a deprecated field name - * is given. If false the deprecated name will be matched but a - * message will also be logged to the {@link DeprecationLogger} * @return true if fieldName matches any of the acceptable * names for this {@link ParseField}. */ - boolean match(String fieldName, boolean strict) { + public boolean match(String fieldName) { Objects.requireNonNull(fieldName, "fieldName cannot be null"); // if this parse field has not been completely deprecated then try to // match the preferred name @@ -128,11 +124,7 @@ public class ParseField { // message to indicate what should be used instead msg = "Deprecated field [" + fieldName + "] used, replaced by [" + allReplacedWith + "]"; } - if (strict) { - throw new IllegalArgumentException(msg); - } else { - DEPRECATION_LOGGER.deprecated(msg); - } + DEPRECATION_LOGGER.deprecated(msg); return true; } } diff --git a/core/src/main/java/org/elasticsearch/common/ParseFieldMatcher.java b/core/src/main/java/org/elasticsearch/common/ParseFieldMatcher.java deleted file mode 100644 index 9866694a230..00000000000 --- a/core/src/main/java/org/elasticsearch/common/ParseFieldMatcher.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common; - -import org.elasticsearch.common.settings.Settings; - -/** - * Matcher to use in combination with {@link ParseField} while parsing requests. Matches a {@link ParseField} - * against a field name and throw deprecation exception depending on the current value of the {@link #PARSE_STRICT} setting. - */ -public class ParseFieldMatcher { - public static final String PARSE_STRICT = "index.query.parse.strict"; - public static final ParseFieldMatcher EMPTY = new ParseFieldMatcher(false); - public static final ParseFieldMatcher STRICT = new ParseFieldMatcher(true); - - private final boolean strict; - - public ParseFieldMatcher(Settings settings) { - this(settings.getAsBoolean(PARSE_STRICT, false)); - } - - public ParseFieldMatcher(boolean strict) { - this.strict = strict; - } - - /** Should deprecated settings be rejected? */ - public boolean isStrict() { - return strict; - } - - /** - * Matches a {@link ParseField} against a field name, and throws deprecation exception depending on the current - * value of the {@link #PARSE_STRICT} setting. - * @param fieldName the field name found in the request while parsing - * @param parseField the parse field that we are looking for - * @throws IllegalArgumentException whenever we are in strict mode and the request contained a deprecated field - * @return true whenever the parse field that we are looking for was found, false otherwise - */ - public boolean match(String fieldName, ParseField parseField) { - return parseField.match(fieldName, strict); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/ParsingException.java b/core/src/main/java/org/elasticsearch/common/ParsingException.java index 0519ab38339..5dc2c8a74e4 100644 --- a/core/src/main/java/org/elasticsearch/common/ParsingException.java +++ b/core/src/main/java/org/elasticsearch/common/ParsingException.java @@ -95,12 +95,11 @@ public class ParsingException extends ElasticsearchException { } @Override - protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException { if (lineNumber != UNKNOWN_POSITION) { builder.field("line", lineNumber); builder.field("col", columnNumber); } - super.innerToXContent(builder, params); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/Strings.java b/core/src/main/java/org/elasticsearch/common/Strings.java index 1ef13e3bc70..2f55ab46e7e 100644 --- a/core/src/main/java/org/elasticsearch/common/Strings.java +++ b/core/src/main/java/org/elasticsearch/common/Strings.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -276,26 +277,6 @@ public class Strings { return true; } - /** - * Count the occurrences of the substring in string s. - * - * @param str string to search in. Return 0 if this is null. - * @param sub string to search for. Return 0 if this is null. - */ - public static int countOccurrencesOf(String str, String sub) { - if (str == null || sub == null || str.length() == 0 || sub.length() == 0) { - return 0; - } - int count = 0; - int pos = 0; - int idx; - while ((idx = str.indexOf(sub, pos)) != -1) { - ++count; - pos = idx + sub.length(); - } - return count; - } - /** * Replace all occurrences of a substring within a string with * another string. @@ -877,26 +858,17 @@ public class Strings { } /** - * Return a {@link String} that is the json representation of the provided - * {@link ToXContent}. + * Return a {@link String} that is the json representation of the provided {@link ToXContent}. + * Wraps the output into an anonymous object. */ public static String toString(ToXContent toXContent) { - return toString(toXContent, false); - } - - /** - * Return a {@link String} that is the json representation of the provided - * {@link ToXContent}. - * @param wrapInObject set this to true if the ToXContent instance expects to be inside an object - */ - public static String toString(ToXContent toXContent, boolean wrapInObject) { try { XContentBuilder builder = JsonXContent.contentBuilder(); - if (wrapInObject) { + if (toXContent.isFragment()) { builder.startObject(); } toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS); - if (wrapInObject) { + if (toXContent.isFragment()) { builder.endObject(); } return builder.string(); diff --git a/core/src/main/java/org/elasticsearch/common/TriFunction.java b/core/src/main/java/org/elasticsearch/common/TriFunction.java new file mode 100644 index 00000000000..85655863a4f --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/TriFunction.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +package org.elasticsearch.common; + +/** + * Represents a function that accepts three arguments and produces a result. + * + * @param the type of the first argument + * @param the type of the second argument + * @param the type of the third argument + * @param the return type + */ +@FunctionalInterface +public interface TriFunction { + /** + * Applies this function to the given arguments. + * + * @param s the first function argument + * @param t the second function argument + * @param u the third function argument + * @return the result + */ + R apply(S s, T t, U u); +} diff --git a/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java b/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java index 68bf52e9e0d..e4019f9f665 100644 --- a/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java +++ b/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java @@ -90,12 +90,12 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker { @Override public void circuitBreak(String fieldName, long bytesNeeded) { this.trippedCount.incrementAndGet(); - final String message = "[" + this.name + "] Data too large, data for [" + - fieldName + "] would be larger than limit of [" + + final String message = "[" + this.name + "] Data too large, data for [" + fieldName + "]" + + " would be [" + bytesNeeded + "/" + new ByteSizeValue(bytesNeeded) + "]" + + ", which is larger than the limit of [" + memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]"; logger.debug("{}", message); - throw new CircuitBreakingException(message, - bytesNeeded, this.memoryBytesLimit); + throw new CircuitBreakingException(message, bytesNeeded, memoryBytesLimit); } /** diff --git a/core/src/main/java/org/elasticsearch/common/breaker/CircuitBreakingException.java b/core/src/main/java/org/elasticsearch/common/breaker/CircuitBreakingException.java index e700d301644..e01fe1beee2 100644 --- a/core/src/main/java/org/elasticsearch/common/breaker/CircuitBreakingException.java +++ b/core/src/main/java/org/elasticsearch/common/breaker/CircuitBreakingException.java @@ -73,9 +73,8 @@ public class CircuitBreakingException extends ElasticsearchException { } @Override - protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException { builder.field("bytes_wanted", bytesWanted); builder.field("bytes_limit", byteLimit); - super.innerToXContent(builder, params); } } diff --git a/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java b/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java index 23e76a9fd35..dbd1fe92ffe 100644 --- a/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java +++ b/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java @@ -79,7 +79,9 @@ public class MemoryCircuitBreaker implements CircuitBreaker { @Override public void circuitBreak(String fieldName, long bytesNeeded) throws CircuitBreakingException { this.trippedCount.incrementAndGet(); - final String message = "Data too large, data for field [" + fieldName + "] would be larger than limit of [" + + final String message = "[" + getName() + "] Data too large, data for field [" + fieldName + "]" + + " would be [" + bytesNeeded + "/" + new ByteSizeValue(bytesNeeded) + "]" + + ", which is larger than the limit of [" + memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]"; logger.debug("{}", message); throw new CircuitBreakingException(message, bytesNeeded, memoryBytesLimit); diff --git a/core/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java b/core/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java index 4dcd92bf549..fff6392f238 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java @@ -37,7 +37,7 @@ final class BytesReferenceStreamInput extends StreamInput { private final int length; // the total size of the stream private int offset; // the current position of the stream - public BytesReferenceStreamInput(BytesRefIterator iterator, final int length) throws IOException { + BytesReferenceStreamInput(BytesRefIterator iterator, final int length) throws IOException { this.iterator = iterator; this.slice = iterator.next(); this.length = length; diff --git a/core/src/main/java/org/elasticsearch/common/cache/Cache.java b/core/src/main/java/org/elasticsearch/common/cache/Cache.java index fd83f5f1494..2297df67655 100644 --- a/core/src/main/java/org/elasticsearch/common/cache/Cache.java +++ b/core/src/main/java/org/elasticsearch/common/cache/Cache.java @@ -166,7 +166,7 @@ public class Cache { Entry after; State state = State.NEW; - public Entry(K key, V value, long writeTime) { + Entry(K key, V value, long writeTime) { this.key = key; this.value = value; this.writeTime = this.accessTime = writeTime; diff --git a/core/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java b/core/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java index 53167686736..85d7eda8363 100644 --- a/core/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java +++ b/core/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java @@ -433,7 +433,7 @@ public final class CopyOnWriteHashMap extends AbstractMap { private final Deque> entries; private final Deque> nodes; - public EntryIterator(Node node) { + EntryIterator(Node node) { entries = new ArrayDeque<>(); nodes = new ArrayDeque<>(); node.visit(entries, nodes); diff --git a/core/src/main/java/org/elasticsearch/common/collect/Iterators.java b/core/src/main/java/org/elasticsearch/common/collect/Iterators.java index d44bf7341c4..a8c811a0d06 100644 --- a/core/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/core/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -36,7 +36,7 @@ public class Iterators { private final Iterator[] iterators; private int index = 0; - public ConcatenatedIterator(Iterator... iterators) { + ConcatenatedIterator(Iterator... iterators) { if (iterators == null) { throw new NullPointerException("iterators"); } diff --git a/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java b/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java index 067d4666722..a355a12d672 100644 --- a/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java +++ b/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; +import java.util.Objects; public class CompressorFactory { @@ -68,9 +69,10 @@ public class CompressorFactory { /** * Uncompress the provided data, data can be detected as compressed using {@link #isCompressed(BytesReference)}. + * @throws NullPointerException a NullPointerException will be thrown when bytes is null */ public static BytesReference uncompressIfNeeded(BytesReference bytes) throws IOException { - Compressor compressor = compressor(bytes); + Compressor compressor = compressor(Objects.requireNonNull(bytes, "the BytesReference must not be null")); BytesReference uncompressed; if (compressor != null) { uncompressed = uncompress(bytes, compressor); diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoDistance.java b/core/src/main/java/org/elasticsearch/common/geo/GeoDistance.java index f63636174f6..b1767e73103 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/GeoDistance.java +++ b/core/src/main/java/org/elasticsearch/common/geo/GeoDistance.java @@ -19,18 +19,10 @@ package org.elasticsearch.common.geo; -import org.apache.lucene.util.Bits; -import org.apache.lucene.util.SloppyMath; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.DistanceUnit; -import org.elasticsearch.index.fielddata.FieldData; -import org.elasticsearch.index.fielddata.GeoPointValues; -import org.elasticsearch.index.fielddata.MultiGeoPointValues; -import org.elasticsearch.index.fielddata.NumericDoubleValues; -import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; -import org.elasticsearch.index.fielddata.SortingNumericDoubleValues; import java.io.IOException; import java.util.Locale; @@ -39,101 +31,9 @@ import java.util.Locale; * Geo distance calculation. */ public enum GeoDistance implements Writeable { - /** - * Calculates distance as points on a plane. Faster, but less accurate than {@link #ARC}. - * @deprecated use {@link GeoUtils#planeDistance} - */ - @Deprecated - PLANE { - @Override - public double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit) { - double px = targetLongitude - sourceLongitude; - double py = targetLatitude - sourceLatitude; - return Math.sqrt(px * px + py * py) * unit.getDistancePerDegree(); - } - - @Override - public double normalize(double distance, DistanceUnit unit) { - return distance; - } - - @Override - public FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) { - return new PlaneFixedSourceDistance(sourceLatitude, sourceLongitude, unit); - } - }, - - /** - * Calculates distance factor. - * Note: {@code calculate} is simply returning the RHS of the spherical law of cosines from 2 lat,lon points. - * {@code normalize} also returns the RHS of the spherical law of cosines for a given distance - * @deprecated use {@link SloppyMath#haversinMeters} to get distance in meters, law of cosines is being removed - */ - @Deprecated - FACTOR { - @Override - public double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit) { - double longitudeDifference = targetLongitude - sourceLongitude; - double a = Math.toRadians(90D - sourceLatitude); - double c = Math.toRadians(90D - targetLatitude); - return (Math.cos(a) * Math.cos(c)) + (Math.sin(a) * Math.sin(c) * Math.cos(Math.toRadians(longitudeDifference))); - } - - @Override - public double normalize(double distance, DistanceUnit unit) { - return Math.cos(distance / unit.getEarthRadius()); - } - - @Override - public FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) { - return new FactorFixedSourceDistance(sourceLatitude, sourceLongitude); - } - }, - /** - * Calculates distance as points on a globe. - * @deprecated use {@link GeoUtils#arcDistance} - */ - @Deprecated - ARC { - @Override - public double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit) { - double result = SloppyMath.haversinMeters(sourceLatitude, sourceLongitude, targetLatitude, targetLongitude); - return unit.fromMeters(result); - } - - @Override - public double normalize(double distance, DistanceUnit unit) { - return distance; - } - - @Override - public FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) { - return new ArcFixedSourceDistance(sourceLatitude, sourceLongitude, unit); - } - }, - /** - * Calculates distance as points on a globe in a sloppy way. Close to the pole areas the accuracy - * of this function decreases. - */ - @Deprecated - SLOPPY_ARC { - - @Override - public double normalize(double distance, DistanceUnit unit) { - return distance; - } - - @Override - public double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit) { - return unit.fromMeters(SloppyMath.haversinMeters(sourceLatitude, sourceLongitude, targetLatitude, targetLongitude)); - } - - @Override - public FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) { - return new SloppyArcFixedSourceDistance(sourceLatitude, sourceLongitude, unit); - } - }; + PLANE, ARC; + /** Creates a GeoDistance instance from an input stream */ public static GeoDistance readFromStream(StreamInput in) throws IOException { int ord = in.readVInt(); if (ord < 0 || ord >= values().length) { @@ -142,70 +42,17 @@ public enum GeoDistance implements Writeable { return GeoDistance.values()[ord]; } + /** Writes an instance of a GeoDistance object to an output stream */ @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(this.ordinal()); } - /** - * Default {@link GeoDistance} function. This method should be used, If no specific function has been selected. - * This is an alias for SLOPPY_ARC - */ - @Deprecated - public static final GeoDistance DEFAULT = SLOPPY_ARC; - - public abstract double normalize(double distance, DistanceUnit unit); - - public abstract double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit); - - public abstract FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit); - - private static final double MIN_LAT = Math.toRadians(-90d); // -PI/2 - private static final double MAX_LAT = Math.toRadians(90d); // PI/2 - private static final double MIN_LON = Math.toRadians(-180d); // -PI - private static final double MAX_LON = Math.toRadians(180d); // PI - - public static DistanceBoundingCheck distanceBoundingCheck(double sourceLatitude, double sourceLongitude, double distance, DistanceUnit unit) { - // angular distance in radians on a great circle - // assume worst-case: use the minor axis - double radDist = unit.toMeters(distance) / GeoUtils.EARTH_SEMI_MINOR_AXIS; - - double radLat = Math.toRadians(sourceLatitude); - double radLon = Math.toRadians(sourceLongitude); - - double minLat = radLat - radDist; - double maxLat = radLat + radDist; - - double minLon, maxLon; - if (minLat > MIN_LAT && maxLat < MAX_LAT) { - double deltaLon = Math.asin(Math.sin(radDist) / Math.cos(radLat)); - minLon = radLon - deltaLon; - if (minLon < MIN_LON) minLon += 2d * Math.PI; - maxLon = radLon + deltaLon; - if (maxLon > MAX_LON) maxLon -= 2d * Math.PI; - } else { - // a pole is within the distance - minLat = Math.max(minLat, MIN_LAT); - maxLat = Math.min(maxLat, MAX_LAT); - minLon = MIN_LON; - maxLon = MAX_LON; - } - - GeoPoint topLeft = new GeoPoint(Math.toDegrees(maxLat), Math.toDegrees(minLon)); - GeoPoint bottomRight = new GeoPoint(Math.toDegrees(minLat), Math.toDegrees(maxLon)); - if (minLon > maxLon) { - return new Meridian180DistanceBoundingCheck(topLeft, bottomRight); - } - return new SimpleDistanceBoundingCheck(topLeft, bottomRight); - } - /** * Get a {@link GeoDistance} according to a given name. Valid values are * *
      *
    • plane for GeoDistance.PLANE
    • - *
    • sloppy_arc for GeoDistance.SLOPPY_ARC
    • - *
    • factor for GeoDistance.FACTOR
    • *
    • arc for GeoDistance.ARC
    • *
    * @@ -218,222 +65,16 @@ public enum GeoDistance implements Writeable { return PLANE; } else if ("arc".equals(name)) { return ARC; - } else if ("sloppy_arc".equals(name)) { - return SLOPPY_ARC; - } else if ("factor".equals(name)) { - return FACTOR; } throw new IllegalArgumentException("No geo distance for [" + name + "]"); } - public interface FixedSourceDistance { - - double calculate(double targetLatitude, double targetLongitude); - } - - public interface DistanceBoundingCheck { - - boolean isWithin(double targetLatitude, double targetLongitude); - - GeoPoint topLeft(); - - GeoPoint bottomRight(); - } - - public static final AlwaysDistanceBoundingCheck ALWAYS_INSTANCE = new AlwaysDistanceBoundingCheck(); - - private static class AlwaysDistanceBoundingCheck implements DistanceBoundingCheck { - @Override - public boolean isWithin(double targetLatitude, double targetLongitude) { - return true; - } - - @Override - public GeoPoint topLeft() { - return null; - } - - @Override - public GeoPoint bottomRight() { - return null; - } - } - - public static class Meridian180DistanceBoundingCheck implements DistanceBoundingCheck { - - private final GeoPoint topLeft; - private final GeoPoint bottomRight; - - public Meridian180DistanceBoundingCheck(GeoPoint topLeft, GeoPoint bottomRight) { - this.topLeft = topLeft; - this.bottomRight = bottomRight; - } - - @Override - public boolean isWithin(double targetLatitude, double targetLongitude) { - return (targetLatitude >= bottomRight.lat() && targetLatitude <= topLeft.lat()) && - (targetLongitude >= topLeft.lon() || targetLongitude <= bottomRight.lon()); - } - - @Override - public GeoPoint topLeft() { - return topLeft; - } - - @Override - public GeoPoint bottomRight() { - return bottomRight; - } - } - - public static class SimpleDistanceBoundingCheck implements DistanceBoundingCheck { - private final GeoPoint topLeft; - private final GeoPoint bottomRight; - - public SimpleDistanceBoundingCheck(GeoPoint topLeft, GeoPoint bottomRight) { - this.topLeft = topLeft; - this.bottomRight = bottomRight; - } - - @Override - public boolean isWithin(double targetLatitude, double targetLongitude) { - return (targetLatitude >= bottomRight.lat() && targetLatitude <= topLeft.lat()) && - (targetLongitude >= topLeft.lon() && targetLongitude <= bottomRight.lon()); - } - - @Override - public GeoPoint topLeft() { - return topLeft; - } - - @Override - public GeoPoint bottomRight() { - return bottomRight; - } - } - - public static class PlaneFixedSourceDistance implements FixedSourceDistance { - - private final double sourceLatitude; - private final double sourceLongitude; - private final double distancePerDegree; - - public PlaneFixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) { - this.sourceLatitude = sourceLatitude; - this.sourceLongitude = sourceLongitude; - this.distancePerDegree = unit.getDistancePerDegree(); - } - - @Override - public double calculate(double targetLatitude, double targetLongitude) { - double px = targetLongitude - sourceLongitude; - double py = targetLatitude - sourceLatitude; - return Math.sqrt(px * px + py * py) * distancePerDegree; - } - } - - public static class FactorFixedSourceDistance implements FixedSourceDistance { - - private final double sourceLongitude; - - private final double a; - private final double sinA; - private final double cosA; - - public FactorFixedSourceDistance(double sourceLatitude, double sourceLongitude) { - this.sourceLongitude = sourceLongitude; - this.a = Math.toRadians(90D - sourceLatitude); - this.sinA = Math.sin(a); - this.cosA = Math.cos(a); - } - - @Override - public double calculate(double targetLatitude, double targetLongitude) { - double longitudeDifference = targetLongitude - sourceLongitude; - double c = Math.toRadians(90D - targetLatitude); - return (cosA * Math.cos(c)) + (sinA * Math.sin(c) * Math.cos(Math.toRadians(longitudeDifference))); - } - } - - /** - * Basic implementation of {@link FixedSourceDistance}. This class keeps the basic parameters for a distance - * functions based on a fixed source. Namely latitude, longitude and unit. - */ - public abstract static class FixedSourceDistanceBase implements FixedSourceDistance { - protected final double sourceLatitude; - protected final double sourceLongitude; - protected final DistanceUnit unit; - - public FixedSourceDistanceBase(double sourceLatitude, double sourceLongitude, DistanceUnit unit) { - this.sourceLatitude = sourceLatitude; - this.sourceLongitude = sourceLongitude; - this.unit = unit; - } - } - - public static class ArcFixedSourceDistance extends FixedSourceDistanceBase { - - public ArcFixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) { - super(sourceLatitude, sourceLongitude, unit); - } - - @Override - public double calculate(double targetLatitude, double targetLongitude) { - return ARC.calculate(sourceLatitude, sourceLongitude, targetLatitude, targetLongitude, unit); - } - - } - - public static class SloppyArcFixedSourceDistance extends FixedSourceDistanceBase { - - public SloppyArcFixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) { - super(sourceLatitude, sourceLongitude, unit); - } - - @Override - public double calculate(double targetLatitude, double targetLongitude) { - return SLOPPY_ARC.calculate(sourceLatitude, sourceLongitude, targetLatitude, targetLongitude, unit); - } - } - - - /** - * Return a {@link SortedNumericDoubleValues} instance that returns the distances to a list of geo-points for each document. - */ - public static SortedNumericDoubleValues distanceValues(final MultiGeoPointValues geoPointValues, final FixedSourceDistance... distances) { - final GeoPointValues singleValues = FieldData.unwrapSingleton(geoPointValues); - if (singleValues != null && distances.length == 1) { - final Bits docsWithField = FieldData.unwrapSingletonBits(geoPointValues); - return FieldData.singleton(new NumericDoubleValues() { - - @Override - public double get(int docID) { - if (docsWithField != null && !docsWithField.get(docID)) { - return 0d; - } - final GeoPoint point = singleValues.get(docID); - return distances[0].calculate(point.lat(), point.lon()); - } - - }, docsWithField); - } else { - return new SortingNumericDoubleValues() { - - @Override - public void setDocument(int doc) { - geoPointValues.setDocument(doc); - resize(geoPointValues.count() * distances.length); - int valueCounter = 0; - for (FixedSourceDistance distance : distances) { - for (int i = 0; i < geoPointValues.count(); ++i) { - final GeoPoint point = geoPointValues.valueAt(i); - values[valueCounter] = distance.calculate(point.lat(), point.lon()); - valueCounter++; - } - } - sort(); - } - }; + /** compute the distance between two points using the selected algorithm (PLANE, ARC) */ + public double calculate(double srcLat, double srcLon, double dstLat, double dstLon, DistanceUnit unit) { + if (this == PLANE) { + return DistanceUnit.convert(GeoUtils.planeDistance(srcLat, srcLon, dstLat, dstLon), + DistanceUnit.METERS, unit); } + return DistanceUnit.convert(GeoUtils.arcDistance(srcLat, srcLon, dstLat, dstLon), DistanceUnit.METERS, unit); } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index 2046b1a6e14..6e4033efd4c 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -19,13 +19,21 @@ package org.elasticsearch.common.geo; +import org.apache.lucene.geo.Rectangle; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.SloppyMath; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.fielddata.FieldData; +import org.elasticsearch.index.fielddata.GeoPointValues; +import org.elasticsearch.index.fielddata.MultiGeoPointValues; +import org.elasticsearch.index.fielddata.NumericDoubleValues; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.index.fielddata.SortingNumericDoubleValues; import java.io.IOException; @@ -65,14 +73,6 @@ public class GeoUtils { /** rounding error for quantized latitude and longitude values */ public static final double TOLERANCE = 1E-6; - /** Returns the minimum between the provided distance 'initialRadius' and the - * maximum distance/radius from the point 'center' before overlapping - **/ - public static double maxRadialDistance(GeoPoint center, double initialRadius) { - final double maxRadius = maxRadialDistanceMeters(center.lat(), center.lon()); - return Math.min(initialRadius, maxRadius); - } - /** Returns true if latitude is actually a valid latitude value.*/ public static boolean isValidLatitude(double latitude) { if (Double.isNaN(latitude) || Double.isInfinite(latitude) || latitude < GeoUtils.MIN_LAT || latitude > GeoUtils.MAX_LAT) { @@ -481,7 +481,8 @@ public class GeoUtils { /** * Return the distance (in meters) between 2 lat,lon geo points using a simple tangential plane - * this provides a faster alternative to {@link GeoUtils#arcDistance} when points are within 5 km + * this provides a faster alternative to {@link GeoUtils#arcDistance} but is innaccurate for distances greater than + * 4 decimal degrees */ public static double planeDistance(double lat1, double lon1, double lat2, double lon2) { double x = (lon2 - lon1) * SloppyMath.TO_RADIANS * Math.cos((lat2 + lat1) / 2.0 * SloppyMath.TO_RADIANS); @@ -489,6 +490,61 @@ public class GeoUtils { return Math.sqrt(x * x + y * y) * EARTH_MEAN_RADIUS; } + /** check if point is within a rectangle + * todo: move this to lucene Rectangle class + */ + public static boolean rectangleContainsPoint(Rectangle r, double lat, double lon) { + if (lat >= r.minLat && lat <= r.maxLat) { + // if rectangle crosses the dateline we only check if the lon is >= min or max + return r.crossesDateline() ? lon >= r.minLon || lon <= r.maxLon : lon >= r.minLon && lon <= r.maxLon; + } + return false; + } + + /** + * Return a {@link SortedNumericDoubleValues} instance that returns the distances to a list of geo-points + * for each document. + */ + public static SortedNumericDoubleValues distanceValues(final GeoDistance distance, + final DistanceUnit unit, + final MultiGeoPointValues geoPointValues, + final GeoPoint... fromPoints) { + final GeoPointValues singleValues = FieldData.unwrapSingleton(geoPointValues); + if (singleValues != null && fromPoints.length == 1) { + final Bits docsWithField = FieldData.unwrapSingletonBits(geoPointValues); + return FieldData.singleton(new NumericDoubleValues() { + + @Override + public double get(int docID) { + if (docsWithField != null && !docsWithField.get(docID)) { + return 0d; + } + final GeoPoint to = singleValues.get(docID); + final GeoPoint from = fromPoints[0]; + return distance.calculate(from.lat(), from.lon(), to.lat(), to.lon(), unit); + } + + }, docsWithField); + } else { + return new SortingNumericDoubleValues() { + @Override + public void setDocument(int doc) { + geoPointValues.setDocument(doc); + resize(geoPointValues.count() * fromPoints.length); + int v = 0; + for (GeoPoint from : fromPoints) { + for (int i = 0; i < geoPointValues.count(); ++i) { + final GeoPoint point = geoPointValues.valueAt(i); + values[v] = distance.calculate(from.lat(), from.lon(), point.lat(), point.lon(), unit); + v++; + } + } + sort(); + } + }; + } + } + private GeoUtils() { } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java b/core/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java index 5a7c9e2a325..acac5fd6690 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java +++ b/core/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java @@ -31,7 +31,7 @@ public enum SpatialStrategy implements Writeable { private final String strategyName; - private SpatialStrategy(String strategyName) { + SpatialStrategy(String strategyName) { this.strategyName = strategyName; } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index cb2f8bb4e78..fbe47a17826 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -381,7 +381,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri } } - public static enum Orientation { + public enum Orientation { LEFT, RIGHT; @@ -427,7 +427,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri /** * Enumeration that lists all {@link GeoShapeType}s that can be handled */ - public static enum GeoShapeType { + public enum GeoShapeType { POINT("point"), MULTIPOINT("multipoint"), LINESTRING("linestring"), @@ -440,7 +440,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri private final String shapename; - private GeoShapeType(String shapename) { + GeoShapeType(String shapename) { this.shapename = shapename; } diff --git a/core/src/main/java/org/elasticsearch/common/inject/ConstantFactory.java b/core/src/main/java/org/elasticsearch/common/inject/ConstantFactory.java index 87bf31e911e..aa7029f2a9c 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/ConstantFactory.java +++ b/core/src/main/java/org/elasticsearch/common/inject/ConstantFactory.java @@ -30,7 +30,7 @@ class ConstantFactory implements InternalFactory { private final Initializable initializable; - public ConstantFactory(Initializable initializable) { + ConstantFactory(Initializable initializable) { this.initializable = initializable; } diff --git a/core/src/main/java/org/elasticsearch/common/inject/DeferredLookups.java b/core/src/main/java/org/elasticsearch/common/inject/DeferredLookups.java index 40d589f37dc..2bc8f770bf8 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/DeferredLookups.java +++ b/core/src/main/java/org/elasticsearch/common/inject/DeferredLookups.java @@ -34,7 +34,7 @@ class DeferredLookups implements Lookups { private final InjectorImpl injector; private final List lookups = new ArrayList<>(); - public DeferredLookups(InjectorImpl injector) { + DeferredLookups(InjectorImpl injector) { this.injector = injector; } diff --git a/core/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java b/core/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java index 8b8b7b78218..ed49d28dea7 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java +++ b/core/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java @@ -36,7 +36,7 @@ final class EncounterImpl implements TypeEncounter { private List> injectionListeners; // lazy private boolean valid = true; - public EncounterImpl(Errors errors, Lookups lookups) { + EncounterImpl(Errors errors, Lookups lookups) { this.errors = errors; this.lookups = lookups; } diff --git a/core/src/main/java/org/elasticsearch/common/inject/ExposedKeyFactory.java b/core/src/main/java/org/elasticsearch/common/inject/ExposedKeyFactory.java index efc10b27e49..b3cf0a14b6b 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/ExposedKeyFactory.java +++ b/core/src/main/java/org/elasticsearch/common/inject/ExposedKeyFactory.java @@ -33,7 +33,7 @@ class ExposedKeyFactory implements InternalFactory, BindingProcessor.Creat private final PrivateElements privateElements; private BindingImpl delegate; - public ExposedKeyFactory(Key key, PrivateElements privateElements) { + ExposedKeyFactory(Key key, PrivateElements privateElements) { this.key = key; this.privateElements = privateElements; } diff --git a/core/src/main/java/org/elasticsearch/common/inject/Initializer.java b/core/src/main/java/org/elasticsearch/common/inject/Initializer.java index 1d68f163bfe..ce7d7765ce3 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/Initializer.java +++ b/core/src/main/java/org/elasticsearch/common/inject/Initializer.java @@ -115,7 +115,7 @@ class Initializer { private final Object source; private MembersInjectorImpl membersInjector; - public InjectableReference(InjectorImpl injector, T instance, Object source) { + InjectableReference(InjectorImpl injector, T instance, Object source) { this.injector = injector; this.instance = Objects.requireNonNull(instance, "instance"); this.source = Objects.requireNonNull(source, "source"); diff --git a/core/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java b/core/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java index e8b38b51330..909779cb442 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java +++ b/core/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java @@ -86,7 +86,7 @@ class InjectionRequestProcessor extends AbstractProcessor { final StaticInjectionRequest request; List memberInjectors; - public StaticInjection(InjectorImpl injector, StaticInjectionRequest request) { + StaticInjection(InjectorImpl injector, StaticInjectionRequest request) { this.injector = injector; this.source = request.getSource(); this.request = request; diff --git a/core/src/main/java/org/elasticsearch/common/inject/InternalFactoryToProviderAdapter.java b/core/src/main/java/org/elasticsearch/common/inject/InternalFactoryToProviderAdapter.java index 8739d9182d8..54fecae9ba9 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/InternalFactoryToProviderAdapter.java +++ b/core/src/main/java/org/elasticsearch/common/inject/InternalFactoryToProviderAdapter.java @@ -33,11 +33,11 @@ class InternalFactoryToProviderAdapter implements InternalFactory { private final Initializable> initializable; private final Object source; - public InternalFactoryToProviderAdapter(Initializable> initializable) { + InternalFactoryToProviderAdapter(Initializable> initializable) { this(initializable, SourceProvider.UNKNOWN_SOURCE); } - public InternalFactoryToProviderAdapter( + InternalFactoryToProviderAdapter( Initializable> initializable, Object source) { this.initializable = Objects.requireNonNull(initializable, "provider"); this.source = Objects.requireNonNull(source, "source"); diff --git a/core/src/main/java/org/elasticsearch/common/inject/Key.java b/core/src/main/java/org/elasticsearch/common/inject/Key.java index 83ab440d23c..833aac2d3fc 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/Key.java +++ b/core/src/main/java/org/elasticsearch/common/inject/Key.java @@ -380,7 +380,7 @@ public class Key { } } - static enum NullAnnotationStrategy implements AnnotationStrategy { + enum NullAnnotationStrategy implements AnnotationStrategy { INSTANCE; @Override diff --git a/core/src/main/java/org/elasticsearch/common/inject/ProviderToInternalFactoryAdapter.java b/core/src/main/java/org/elasticsearch/common/inject/ProviderToInternalFactoryAdapter.java index d7b6afbe6da..f4437569088 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/ProviderToInternalFactoryAdapter.java +++ b/core/src/main/java/org/elasticsearch/common/inject/ProviderToInternalFactoryAdapter.java @@ -30,7 +30,7 @@ class ProviderToInternalFactoryAdapter implements Provider { private final InjectorImpl injector; private final InternalFactory internalFactory; - public ProviderToInternalFactoryAdapter(InjectorImpl injector, + ProviderToInternalFactoryAdapter(InjectorImpl injector, InternalFactory internalFactory) { this.injector = injector; this.internalFactory = internalFactory; diff --git a/core/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java b/core/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java index 10ba17d86cd..ce02a26ffd0 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java +++ b/core/src/main/java/org/elasticsearch/common/inject/SingleFieldInjector.java @@ -34,7 +34,7 @@ class SingleFieldInjector implements SingleMemberInjector { final Dependency dependency; final InternalFactory factory; - public SingleFieldInjector(InjectorImpl injector, InjectionPoint injectionPoint, Errors errors) + SingleFieldInjector(InjectorImpl injector, InjectionPoint injectionPoint, Errors errors) throws ErrorsException { this.injectionPoint = injectionPoint; this.field = (Field) injectionPoint.getMember(); diff --git a/core/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java b/core/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java index 9c407791160..7330d05df3b 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java +++ b/core/src/main/java/org/elasticsearch/common/inject/SingleMethodInjector.java @@ -34,7 +34,7 @@ class SingleMethodInjector implements SingleMemberInjector { final SingleParameterInjector[] parameterInjectors; final InjectionPoint injectionPoint; - public SingleMethodInjector(InjectorImpl injector, InjectionPoint injectionPoint, Errors errors) + SingleMethodInjector(InjectorImpl injector, InjectionPoint injectionPoint, Errors errors) throws ErrorsException { this.injectionPoint = injectionPoint; final Method method = (Method) injectionPoint.getMember(); diff --git a/core/src/main/java/org/elasticsearch/common/inject/assistedinject/AssistedConstructor.java b/core/src/main/java/org/elasticsearch/common/inject/assistedinject/AssistedConstructor.java index 07166537081..edd45c290a5 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/assistedinject/AssistedConstructor.java +++ b/core/src/main/java/org/elasticsearch/common/inject/assistedinject/AssistedConstructor.java @@ -43,7 +43,7 @@ class AssistedConstructor { private final List allParameters; @SuppressWarnings("unchecked") - public AssistedConstructor(Constructor constructor, List> parameterTypes) { + AssistedConstructor(Constructor constructor, List> parameterTypes) { this.constructor = constructor; Annotation[][] annotations = constructor.getParameterAnnotations(); diff --git a/core/src/main/java/org/elasticsearch/common/inject/assistedinject/Parameter.java b/core/src/main/java/org/elasticsearch/common/inject/assistedinject/Parameter.java index 5ceb086db9f..a21dc3aa7f5 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/assistedinject/Parameter.java +++ b/core/src/main/java/org/elasticsearch/common/inject/assistedinject/Parameter.java @@ -39,7 +39,7 @@ class Parameter { private final Annotation bindingAnnotation; private final boolean isProvider; - public Parameter(Type type, Annotation[] annotations) { + Parameter(Type type, Annotation[] annotations) { this.type = type; this.bindingAnnotation = getBindingAnnotation(annotations); this.isAssisted = hasAssistedAnnotation(annotations); diff --git a/core/src/main/java/org/elasticsearch/common/inject/assistedinject/ParameterListKey.java b/core/src/main/java/org/elasticsearch/common/inject/assistedinject/ParameterListKey.java index fc2a96e19df..7967f473948 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/assistedinject/ParameterListKey.java +++ b/core/src/main/java/org/elasticsearch/common/inject/assistedinject/ParameterListKey.java @@ -34,11 +34,11 @@ class ParameterListKey { private final List paramList; - public ParameterListKey(List paramList) { + ParameterListKey(List paramList) { this.paramList = new ArrayList<>(paramList); } - public ParameterListKey(Type[] types) { + ParameterListKey(Type[] types) { this(Arrays.asList(types)); } diff --git a/core/src/main/java/org/elasticsearch/common/inject/internal/NullOutputException.java b/core/src/main/java/org/elasticsearch/common/inject/internal/NullOutputException.java index cc5a23a786d..0fec6b5bac2 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/internal/NullOutputException.java +++ b/core/src/main/java/org/elasticsearch/common/inject/internal/NullOutputException.java @@ -24,7 +24,7 @@ package org.elasticsearch.common.inject.internal; * @author Bob Lee */ class NullOutputException extends NullPointerException { - public NullOutputException(String s) { + NullOutputException(String s) { super(s); } } diff --git a/core/src/main/java/org/elasticsearch/common/inject/matcher/AbstractMatcher.java b/core/src/main/java/org/elasticsearch/common/inject/matcher/AbstractMatcher.java index 931d290fc19..76df334e4e3 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/matcher/AbstractMatcher.java +++ b/core/src/main/java/org/elasticsearch/common/inject/matcher/AbstractMatcher.java @@ -36,7 +36,7 @@ public abstract class AbstractMatcher implements Matcher { private static class AndMatcher extends AbstractMatcher { private final Matcher a, b; - public AndMatcher(Matcher a, Matcher b) { + AndMatcher(Matcher a, Matcher b) { this.a = a; this.b = b; } @@ -67,7 +67,7 @@ public abstract class AbstractMatcher implements Matcher { private static class OrMatcher extends AbstractMatcher { private final Matcher a, b; - public OrMatcher(Matcher a, Matcher b) { + OrMatcher(Matcher a, Matcher b) { this.a = a; this.b = b; } diff --git a/core/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java b/core/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java index e2ced98034e..cc354145b11 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java +++ b/core/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java @@ -113,7 +113,7 @@ public class Matchers { private static class AnnotatedWithType extends AbstractMatcher { private final Class annotationType; - public AnnotatedWithType(Class annotationType) { + AnnotatedWithType(Class annotationType) { this.annotationType = Objects.requireNonNull(annotationType, "annotation type"); checkForRuntimeRetention(annotationType); } @@ -152,7 +152,7 @@ public class Matchers { private static class AnnotatedWith extends AbstractMatcher { private final Annotation annotation; - public AnnotatedWith(Annotation annotation) { + AnnotatedWith(Annotation annotation) { this.annotation = Objects.requireNonNull(annotation, "annotation"); checkForRuntimeRetention(annotation.annotationType()); } @@ -191,7 +191,7 @@ public class Matchers { private static class SubclassesOf extends AbstractMatcher { private final Class superclass; - public SubclassesOf(Class superclass) { + SubclassesOf(Class superclass) { this.superclass = Objects.requireNonNull(superclass, "superclass"); } @@ -227,7 +227,7 @@ public class Matchers { private static class Only extends AbstractMatcher { private final Object value; - public Only(Object value) { + Only(Object value) { this.value = Objects.requireNonNull(value, "value"); } @@ -263,7 +263,7 @@ public class Matchers { private static class IdenticalTo extends AbstractMatcher { private final Object value; - public IdenticalTo(Object value) { + IdenticalTo(Object value) { this.value = Objects.requireNonNull(value, "value"); } @@ -301,7 +301,7 @@ public class Matchers { private final transient Package targetPackage; private final String packageName; - public InPackage(Package targetPackage) { + InPackage(Package targetPackage) { this.targetPackage = Objects.requireNonNull(targetPackage, "package"); this.packageName = targetPackage.getName(); } @@ -345,7 +345,7 @@ public class Matchers { private static class InSubpackage extends AbstractMatcher { private final String targetPackageName; - public InSubpackage(String targetPackageName) { + InSubpackage(String targetPackageName) { this.targetPackageName = targetPackageName; } @@ -384,7 +384,7 @@ public class Matchers { private static class Returns extends AbstractMatcher { private final Matcher> returnType; - public Returns(Matcher> returnType) { + Returns(Matcher> returnType) { this.returnType = Objects.requireNonNull(returnType, "return type matcher"); } diff --git a/core/src/main/java/org/elasticsearch/common/inject/name/NamedImpl.java b/core/src/main/java/org/elasticsearch/common/inject/name/NamedImpl.java index e4cc088e30a..eb3fb00a51a 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/name/NamedImpl.java +++ b/core/src/main/java/org/elasticsearch/common/inject/name/NamedImpl.java @@ -23,7 +23,7 @@ class NamedImpl implements Named { private final String value; - public NamedImpl(String value) { + NamedImpl(String value) { this.value = Objects.requireNonNull(value, "name"); } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java index 55da169e620..5db80a711ee 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java @@ -36,14 +36,20 @@ public class NamedWriteableAwareStreamInput extends FilterStreamInput { @Override public C readNamedWriteable(Class categoryClass) throws IOException { String name = readString(); + return readNamedWriteable(categoryClass, name); + } + + @Override + public C readNamedWriteable(@SuppressWarnings("unused") Class categoryClass, + @SuppressWarnings("unused") String name) throws IOException { Writeable.Reader reader = namedWriteableRegistry.getReader(categoryClass, name); C c = reader.read(this); if (c == null) { throw new IOException( - "Writeable.Reader [" + reader + "] returned null which is not allowed and probably means it screwed up the stream."); + "Writeable.Reader [" + reader + "] returned null which is not allowed and probably means it screwed up the stream."); } assert name.equals(c.getWriteableName()) : c + " claims to have a different name [" + c.getWriteableName() - + "] than it was read from [" + name + "]."; + + "] than it was read from [" + name + "]."; return c; } } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/NotSerializableExceptionWrapper.java b/core/src/main/java/org/elasticsearch/common/io/stream/NotSerializableExceptionWrapper.java index b25d4bc9b72..fd4a215eabf 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/NotSerializableExceptionWrapper.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/NotSerializableExceptionWrapper.java @@ -38,8 +38,7 @@ public final class NotSerializableExceptionWrapper extends ElasticsearchExceptio private final RestStatus status; public NotSerializableExceptionWrapper(Throwable other) { - super(ElasticsearchException.getExceptionName(other) + - ": " + other.getMessage(), other.getCause()); + super(ElasticsearchException.getExceptionName(other) + ": " + other.getMessage(), other.getCause()); this.name = ElasticsearchException.getExceptionName(other); this.status = ExceptionsHelper.status(other); setStackTrace(other.getStackTrace()); @@ -51,6 +50,9 @@ public final class NotSerializableExceptionWrapper extends ElasticsearchExceptio for (String key : ex.getHeaderKeys()) { this.addHeader(key, ex.getHeader(key)); } + for (String key : ex.getMetadataKeys()) { + this.addMetadata(key, ex.getMetadata(key)); + } } } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index f7a8a2ff1a6..e33c3ed840a 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -214,9 +214,8 @@ public abstract class StreamInput extends InputStream { } /** - * Reads a long stored in variable-length format. Reads between one and - * nine bytes. Smaller values take fewer bytes. Negative numbers are not - * supported. + * Reads a long stored in variable-length format. Reads between one and ten bytes. Smaller values take fewer bytes. Negative numbers + * are encoded in ten bytes so prefer {@link #readLong()} or {@link #readZLong()} for negative numbers. */ public long readVLong() throws IOException { byte b = readByte(); @@ -260,8 +259,16 @@ public abstract class StreamInput extends InputStream { return i; } b = readByte(); - assert (b & 0x80) == 0; - return i | ((b & 0x7FL) << 56); + i |= ((b & 0x7FL) << 56); + if ((b & 0x80) == 0) { + return i; + } + b = readByte(); + if (b != 0 && b != 1) { + throw new IOException("Invalid vlong (" + Integer.toHexString(b) + " << 63) | " + Long.toHexString(i)); + } + i |= ((long) b) << 63; + return i; } public long readZLong() throws IOException { @@ -818,6 +825,22 @@ public abstract class StreamInput extends InputStream { throw new UnsupportedOperationException("can't read named writeable from StreamInput"); } + /** + * Reads a {@link NamedWriteable} from the current stream with the given name. It is assumed that the caller obtained the name + * from other source, so it's not read from the stream. The name is used for looking for + * the corresponding entry in the registry by name, so that the proper object can be read and returned. + * Default implementation throws {@link UnsupportedOperationException} as StreamInput doesn't hold a registry. + * Use {@link FilterInputStream} instead which wraps a stream and supports a {@link NamedWriteableRegistry} too. + * + * Prefer {@link StreamInput#readNamedWriteable(Class)} and {@link StreamOutput#writeNamedWriteable(NamedWriteable)} unless you + * have a compelling reason to use this method instead. + */ + @Nullable + public C readNamedWriteable(@SuppressWarnings("unused") Class categoryClass, + @SuppressWarnings("unused") String name) throws IOException { + throw new UnsupportedOperationException("can't read named writeable from StreamInput"); + } + /** * Reads an optional {@link NamedWriteable}. */ diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 4fc253cf45d..4d57e7c1b88 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -210,12 +210,22 @@ public abstract class StreamOutput extends OutputStream { } /** - * Writes a non-negative long in a variable-length format. - * Writes between one and nine bytes. Smaller values take fewer bytes. - * Negative numbers are not supported. + * Writes a non-negative long in a variable-length format. Writes between one and ten bytes. Smaller values take fewer bytes. Negative + * numbers use ten bytes and trip assertions (if running in tests) so prefer {@link #writeLong(long)} or {@link #writeZLong(long)} for + * negative numbers. */ public void writeVLong(long i) throws IOException { - assert i >= 0; + if (i < 0) { + throw new IllegalStateException("Negative longs unsupported, use writeLong or writeZLong for negative numbers [" + i + "]"); + } + writeVLongNoCheck(i); + } + + /** + * Writes a long in a variable-length format without first checking if it is negative. Package private for testing. Use + * {@link #writeVLong(long)} instead. + */ + void writeVLongNoCheck(long i) throws IOException { while ((i & ~0x7F) != 0) { writeByte((byte) ((i & 0x7f) | 0x80)); i >>>= 7; @@ -328,7 +338,7 @@ public abstract class StreamOutput extends OutputStream { // make sure any possible char can fit into the buffer in any possible iteration // we need at most 3 bytes so we flush the buffer once we have less than 3 bytes // left before we start another iteration - if (offset > buffer.length-3) { + if (offset > buffer.length - 3) { writeBytes(buffer, offset); offset = 0; } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java b/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java index 30607f33759..9d645038d65 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java @@ -34,7 +34,7 @@ public interface Writeable { /** * Write this into the {@linkplain StreamOutput}. */ - void writeTo(final StreamOutput out) throws IOException; + void writeTo(StreamOutput out) throws IOException; /** * Reference to a method that can write some object to a {@link StreamOutput}. @@ -60,7 +60,7 @@ public interface Writeable { * @param out Output to write the {@code value} too * @param value The value to add */ - void write(final StreamOutput out, final V value) throws IOException; + void write(StreamOutput out, V value) throws IOException; } @@ -86,7 +86,7 @@ public interface Writeable { * * @param in Input to read the value from */ - V read(final StreamInput in) throws IOException; + V read(StreamInput in) throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index ace0569a14a..39b882f1a1d 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -21,11 +21,11 @@ package org.elasticsearch.common.logging; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.common.SuppressLoggerChecks; import org.elasticsearch.common.util.concurrent.ThreadContext; import java.util.Iterator; +import java.util.Objects; import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; @@ -42,7 +42,7 @@ public class DeprecationLogger { * * https://tools.ietf.org/html/rfc7234#section-5.5 */ - public static final String DEPRECATION_HEADER = "Warning"; + public static final String WARNING_HEADER = "Warning"; /** * This is set once by the {@code Node} constructor, but it uses {@link CopyOnWriteArraySet} to ensure that tests can run in parallel. @@ -64,7 +64,7 @@ public class DeprecationLogger { * @throws IllegalStateException if this {@code threadContext} has already been set */ public static void setThreadContext(ThreadContext threadContext) { - assert threadContext != null; + Objects.requireNonNull(threadContext, "Cannot register a null ThreadContext"); // add returning false means it _did_ have it already if (THREAD_CONTEXT.add(threadContext) == false) { @@ -128,7 +128,7 @@ public class DeprecationLogger { while (iterator.hasNext()) { try { - iterator.next().addResponseHeader(DEPRECATION_HEADER, formattedMessage); + iterator.next().addResponseHeader(WARNING_HEADER, formattedMessage); } catch (IllegalStateException e) { // ignored; it should be removed shortly } diff --git a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java index 548c1da5a8c..16f47f78ddb 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java +++ b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java @@ -38,8 +38,8 @@ public final class ESLoggerFactory { public static final Setting LOG_DEFAULT_LEVEL_SETTING = new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Property.NodeScope); public static final Setting LOG_LEVEL_SETTING = - Setting.prefixKeySetting("logger.", Level.INFO.name(), Level::valueOf, - Property.Dynamic, Property.NodeScope); + Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Property.Dynamic, + Property.NodeScope)); public static Logger getLogger(String prefix, String name) { return getLogger(prefix, LogManager.getLogger(name)); diff --git a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index 428e3ce7964..5e20b6c37e3 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.node.Node; import java.io.IOException; import java.nio.file.FileVisitOption; @@ -97,7 +98,7 @@ public class LogConfigurator { final Set options = EnumSet.of(FileVisitOption.FOLLOW_LINKS); Files.walkFileTree(configsPath, options, Integer.MAX_VALUE, new SimpleFileVisitor() { @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException { if (file.getFileName().toString().equals("log4j2.properties")) { configurations.add((PropertiesConfiguration) factory.getConfiguration(context, file.toString(), file.toUri())); } @@ -122,23 +123,53 @@ public class LogConfigurator { Configurator.initialize(builder.build()); } - private static void configureLoggerLevels(Settings settings) { + /** + * Configures the logging levels for loggers configured in the specified settings. + * + * @param settings the settings from which logger levels will be extracted + */ + private static void configureLoggerLevels(final Settings settings) { if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) { final Level level = ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings); Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); } final Map levels = settings.filter(ESLoggerFactory.LOG_LEVEL_SETTING::match).getAsMap(); - for (String key : levels.keySet()) { - final Level level = ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings); - Loggers.setLevel(ESLoggerFactory.getLogger(key.substring("logger.".length())), level); + for (final String key : levels.keySet()) { + // do not set a log level for a logger named level (from the default log setting) + if (!key.equals(ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.getKey())) { + final Level level = ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings); + Loggers.setLevel(ESLoggerFactory.getLogger(key.substring("logger.".length())), level); + } } } - + /** + * Set system properties that can be used in configuration files to specify paths and file patterns for log files. We expose three + * properties here: + *
      + *
    • + * {@code es.logs.base_path} the base path containing the log files + *
    • + *
    • + * {@code es.logs.cluster_name} the cluster name, used as the prefix of log filenames in the default configuration + *
    • + *
    • + * {@code es.logs.node_name} the node name, can be used as part of log filenames (only exposed if {@link Node#NODE_NAME_SETTING} is + * explicitly set) + *
    • + *
    + * + * @param logsPath the path to the log files + * @param settings the settings to extract the cluster and node names + */ @SuppressForbidden(reason = "sets system property for logging configuration") private static void setLogConfigurationSystemProperty(final Path logsPath, final Settings settings) { - System.setProperty("es.logs", logsPath.resolve(ClusterName.CLUSTER_NAME_SETTING.get(settings).value()).toString()); + System.setProperty("es.logs.base_path", logsPath.toString()); + System.setProperty("es.logs.cluster_name", ClusterName.CLUSTER_NAME_SETTING.get(settings).value()); + if (Node.NODE_NAME_SETTING.exists(settings)) { + System.setProperty("es.logs.node_name", Node.NODE_NAME_SETTING.get(settings)); + } } } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index e3d3bf1c624..2a539a84981 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -56,6 +56,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; +import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -280,31 +281,8 @@ public class Lucene { } public static TopDocs readTopDocs(StreamInput in) throws IOException { - if (in.readBoolean()) { - int totalHits = in.readVInt(); - float maxScore = in.readFloat(); - - SortField[] fields = new SortField[in.readVInt()]; - for (int i = 0; i < fields.length; i++) { - String field = null; - if (in.readBoolean()) { - field = in.readString(); - } - SortField.Type sortType = readSortType(in); - Object missingValue = readMissingValue(in); - boolean reverse = in.readBoolean(); - fields[i] = new SortField(field, sortType, reverse); - if (missingValue != null) { - fields[i].setMissingValue(missingValue); - } - } - - FieldDoc[] fieldDocs = new FieldDoc[in.readVInt()]; - for (int i = 0; i < fieldDocs.length; i++) { - fieldDocs[i] = readFieldDoc(in); - } - return new TopFieldDocs(totalHits, fieldDocs, fields, maxScore); - } else { + byte type = in.readByte(); + if (type == 0) { int totalHits = in.readVInt(); float maxScore = in.readFloat(); @@ -313,6 +291,39 @@ public class Lucene { scoreDocs[i] = new ScoreDoc(in.readVInt(), in.readFloat()); } return new TopDocs(totalHits, scoreDocs, maxScore); + } else if (type == 1) { + int totalHits = in.readVInt(); + float maxScore = in.readFloat(); + + SortField[] fields = new SortField[in.readVInt()]; + for (int i = 0; i < fields.length; i++) { + fields[i] = readSortField(in); + } + + FieldDoc[] fieldDocs = new FieldDoc[in.readVInt()]; + for (int i = 0; i < fieldDocs.length; i++) { + fieldDocs[i] = readFieldDoc(in); + } + return new TopFieldDocs(totalHits, fieldDocs, fields, maxScore); + } else if (type == 2) { + int totalHits = in.readVInt(); + float maxScore = in.readFloat(); + + String field = in.readString(); + SortField[] fields = new SortField[in.readVInt()]; + for (int i = 0; i < fields.length; i++) { + fields[i] = readSortField(in); + } + int size = in.readVInt(); + Object[] collapseValues = new Object[size]; + FieldDoc[] fieldDocs = new FieldDoc[size]; + for (int i = 0; i < fieldDocs.length; i++) { + fieldDocs[i] = readFieldDoc(in); + collapseValues[i] = readSortValue(in); + } + return new CollapseTopFieldDocs(field, totalHits, fieldDocs, fields, collapseValues, maxScore); + } else { + throw new IllegalStateException("Unknown type " + type); } } @@ -347,6 +358,33 @@ public class Lucene { return new FieldDoc(in.readVInt(), in.readFloat(), cFields); } + private static Comparable readSortValue(StreamInput in) throws IOException { + byte type = in.readByte(); + if (type == 0) { + return null; + } else if (type == 1) { + return in.readString(); + } else if (type == 2) { + return in.readInt(); + } else if (type == 3) { + return in.readLong(); + } else if (type == 4) { + return in.readFloat(); + } else if (type == 5) { + return in.readDouble(); + } else if (type == 6) { + return in.readByte(); + } else if (type == 7) { + return in.readShort(); + } else if (type == 8) { + return in.readBoolean(); + } else if (type == 9) { + return in.readBytesRef(); + } else { + throw new IOException("Can't match type [" + type + "]"); + } + } + public static ScoreDoc readScoreDoc(StreamInput in) throws IOException { return new ScoreDoc(in.readVInt(), in.readFloat()); } @@ -354,8 +392,28 @@ public class Lucene { private static final Class GEO_DISTANCE_SORT_TYPE_CLASS = LatLonDocValuesField.newDistanceSort("some_geo_field", 0, 0).getClass(); public static void writeTopDocs(StreamOutput out, TopDocs topDocs) throws IOException { - if (topDocs instanceof TopFieldDocs) { - out.writeBoolean(true); + if (topDocs instanceof CollapseTopFieldDocs) { + out.writeByte((byte) 2); + CollapseTopFieldDocs collapseDocs = (CollapseTopFieldDocs) topDocs; + + out.writeVInt(topDocs.totalHits); + out.writeFloat(topDocs.getMaxScore()); + + out.writeString(collapseDocs.field); + + out.writeVInt(collapseDocs.fields.length); + for (SortField sortField : collapseDocs.fields) { + writeSortField(out, sortField); + } + + out.writeVInt(topDocs.scoreDocs.length); + for (int i = 0; i < topDocs.scoreDocs.length; i++) { + ScoreDoc doc = collapseDocs.scoreDocs[i]; + writeFieldDoc(out, (FieldDoc) doc); + writeSortValue(out, collapseDocs.collapseValues[i]); + } + } else if (topDocs instanceof TopFieldDocs) { + out.writeByte((byte) 1); TopFieldDocs topFieldDocs = (TopFieldDocs) topDocs; out.writeVInt(topDocs.totalHits); @@ -363,31 +421,7 @@ public class Lucene { out.writeVInt(topFieldDocs.fields.length); for (SortField sortField : topFieldDocs.fields) { - if (sortField.getClass() == GEO_DISTANCE_SORT_TYPE_CLASS) { - // for geo sorting, we replace the SortField with a SortField that assumes a double field. - // this works since the SortField is only used for merging top docs - SortField newSortField = new SortField(sortField.getField(), SortField.Type.DOUBLE); - newSortField.setMissingValue(sortField.getMissingValue()); - sortField = newSortField; - } - if (sortField.getClass() != SortField.class) { - throw new IllegalArgumentException("Cannot serialize SortField impl [" + sortField + "]"); - } - if (sortField.getField() == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeString(sortField.getField()); - } - if (sortField.getComparatorSource() != null) { - IndexFieldData.XFieldComparatorSource comparatorSource = (IndexFieldData.XFieldComparatorSource) sortField.getComparatorSource(); - writeSortType(out, comparatorSource.reducedType()); - writeMissingValue(out, comparatorSource.missingValue(sortField.getReverse())); - } else { - writeSortType(out, sortField.getType()); - writeMissingValue(out, sortField.getMissingValue()); - } - out.writeBoolean(sortField.getReverse()); + writeSortField(out, sortField); } out.writeVInt(topDocs.scoreDocs.length); @@ -395,7 +429,7 @@ public class Lucene { writeFieldDoc(out, (FieldDoc) doc); } } else { - out.writeBoolean(false); + out.writeByte((byte) 0); out.writeVInt(topDocs.totalHits); out.writeFloat(topDocs.getMaxScore()); @@ -431,44 +465,49 @@ public class Lucene { } } + + private static void writeSortValue(StreamOutput out, Object field) throws IOException { + if (field == null) { + out.writeByte((byte) 0); + } else { + Class type = field.getClass(); + if (type == String.class) { + out.writeByte((byte) 1); + out.writeString((String) field); + } else if (type == Integer.class) { + out.writeByte((byte) 2); + out.writeInt((Integer) field); + } else if (type == Long.class) { + out.writeByte((byte) 3); + out.writeLong((Long) field); + } else if (type == Float.class) { + out.writeByte((byte) 4); + out.writeFloat((Float) field); + } else if (type == Double.class) { + out.writeByte((byte) 5); + out.writeDouble((Double) field); + } else if (type == Byte.class) { + out.writeByte((byte) 6); + out.writeByte((Byte) field); + } else if (type == Short.class) { + out.writeByte((byte) 7); + out.writeShort((Short) field); + } else if (type == Boolean.class) { + out.writeByte((byte) 8); + out.writeBoolean((Boolean) field); + } else if (type == BytesRef.class) { + out.writeByte((byte) 9); + out.writeBytesRef((BytesRef) field); + } else { + throw new IOException("Can't handle sort field value of type [" + type + "]"); + } + } + } + public static void writeFieldDoc(StreamOutput out, FieldDoc fieldDoc) throws IOException { out.writeVInt(fieldDoc.fields.length); for (Object field : fieldDoc.fields) { - if (field == null) { - out.writeByte((byte) 0); - } else { - Class type = field.getClass(); - if (type == String.class) { - out.writeByte((byte) 1); - out.writeString((String) field); - } else if (type == Integer.class) { - out.writeByte((byte) 2); - out.writeInt((Integer) field); - } else if (type == Long.class) { - out.writeByte((byte) 3); - out.writeLong((Long) field); - } else if (type == Float.class) { - out.writeByte((byte) 4); - out.writeFloat((Float) field); - } else if (type == Double.class) { - out.writeByte((byte) 5); - out.writeDouble((Double) field); - } else if (type == Byte.class) { - out.writeByte((byte) 6); - out.writeByte((Byte) field); - } else if (type == Short.class) { - out.writeByte((byte) 7); - out.writeShort((Short) field); - } else if (type == Boolean.class) { - out.writeByte((byte) 8); - out.writeBoolean((Boolean) field); - } else if (type == BytesRef.class) { - out.writeByte((byte) 9); - out.writeBytesRef((BytesRef) field); - } else { - throw new IOException("Can't handle sort field value of type [" + type + "]"); - } - } + writeSortValue(out, field); } out.writeVInt(fieldDoc.doc); out.writeFloat(fieldDoc.score); @@ -487,10 +526,53 @@ public class Lucene { return SortField.Type.values()[in.readVInt()]; } + public static SortField readSortField(StreamInput in) throws IOException { + String field = null; + if (in.readBoolean()) { + field = in.readString(); + } + SortField.Type sortType = readSortType(in); + Object missingValue = readMissingValue(in); + boolean reverse = in.readBoolean(); + SortField sortField = new SortField(field, sortType, reverse); + if (missingValue != null) { + sortField.setMissingValue(missingValue); + } + return sortField; + } + public static void writeSortType(StreamOutput out, SortField.Type sortType) throws IOException { out.writeVInt(sortType.ordinal()); } + public static void writeSortField(StreamOutput out, SortField sortField) throws IOException { + if (sortField.getClass() == GEO_DISTANCE_SORT_TYPE_CLASS) { + // for geo sorting, we replace the SortField with a SortField that assumes a double field. + // this works since the SortField is only used for merging top docs + SortField newSortField = new SortField(sortField.getField(), SortField.Type.DOUBLE); + newSortField.setMissingValue(sortField.getMissingValue()); + sortField = newSortField; + } + if (sortField.getClass() != SortField.class) { + throw new IllegalArgumentException("Cannot serialize SortField impl [" + sortField + "]"); + } + if (sortField.getField() == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeString(sortField.getField()); + } + if (sortField.getComparatorSource() != null) { + IndexFieldData.XFieldComparatorSource comparatorSource = (IndexFieldData.XFieldComparatorSource) sortField.getComparatorSource(); + writeSortType(out, comparatorSource.reducedType()); + writeMissingValue(out, comparatorSource.missingValue(sortField.getReverse())); + } else { + writeSortType(out, sortField.getType()); + writeMissingValue(out, sortField.getMissingValue()); + } + out.writeBoolean(sortField.getReverse()); + } + public static Explanation readExplanation(StreamInput in) throws IOException { boolean match = in.readBoolean(); String description = in.readString(); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java b/core/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java index 146fb7ba05e..38e7691f398 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java @@ -29,9 +29,9 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.IdentityHashMap; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; /** * A map between segment core cache keys and the shard that these segments @@ -50,7 +50,7 @@ public final class ShardCoreKeyMap { private final Map> indexToCoreKey; public ShardCoreKeyMap() { - coreKeyToShard = new IdentityHashMap<>(); + coreKeyToShard = new ConcurrentHashMap<>(); indexToCoreKey = new HashMap<>(); } @@ -64,9 +64,17 @@ public final class ShardCoreKeyMap { throw new IllegalArgumentException("Could not extract shard id from " + reader); } final Object coreKey = reader.getCoreCacheKey(); + + if (coreKeyToShard.containsKey(coreKey)) { + // Do this check before entering the synchronized block in order to + // avoid taking the mutex if possible (which should happen most of + // the time). + return; + } + final String index = shardId.getIndexName(); synchronized (this) { - if (coreKeyToShard.put(coreKey, shardId) == null) { + if (coreKeyToShard.containsKey(coreKey) == false) { Set objects = indexToCoreKey.get(index); if (objects == null) { objects = new HashSet<>(); @@ -90,6 +98,14 @@ public final class ShardCoreKeyMap { try { reader.addCoreClosedListener(listener); addedListener = true; + + // Only add the core key to the map as a last operation so that + // if another thread sees that the core key is already in the + // map (like the check just before this synchronized block), + // then it means that the closed listener has already been + // registered. + ShardId previous = coreKeyToShard.put(coreKey, shardId); + assert previous == null; } finally { if (false == addedListener) { try { diff --git a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java index 269c1c55eec..5307a417e10 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java @@ -32,7 +32,6 @@ import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TermQuery; @@ -87,21 +86,18 @@ public final class AllTermQuery extends Query { if (rewritten != this) { return rewritten; } - boolean fieldExists = false; boolean hasPayloads = false; for (LeafReaderContext context : reader.leaves()) { final Terms terms = context.reader().terms(term.field()); if (terms != null) { - fieldExists = true; if (terms.hasPayloads()) { hasPayloads = true; break; } } } - if (fieldExists == false) { - return new MatchNoDocsQuery(); - } + // if the terms does not exist we could return a MatchNoDocsQuery but this would break the unified highlighter + // which rewrites query with an empty reader. if (hasPayloads == false) { return new TermQuery(term); } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java index 87bfdacb1c7..a76428e829a 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -25,6 +25,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.Query; @@ -115,6 +117,20 @@ public class MultiPhrasePrefixQuery extends Query { positions.add(position); } + /** + * Returns the terms for each position in this phrase + */ + public Term[][] getTerms() { + Term[][] terms = new Term[termArrays.size()][]; + for (int i = 0; i < termArrays.size(); i++) { + terms[i] = new Term[termArrays.get(i).length]; + for (int j = 0; j < termArrays.get(i).length; j++) { + terms[i][j] = termArrays.get(i)[j]; + } + } + return terms; + } + /** * Returns the relative positions of terms in this phrase. */ @@ -150,7 +166,12 @@ public class MultiPhrasePrefixQuery extends Query { } } if (terms.isEmpty()) { - return Queries.newMatchNoDocsQuery("No terms supplied for " + MultiPhrasePrefixQuery.class.getName()); + // if the terms does not exist we could return a MatchNoDocsQuery but this would break the unified highlighter + // which rewrites query with an empty reader. + return new BooleanQuery.Builder() + .add(query.build(), BooleanClause.Occur.MUST) + .add(Queries.newMatchNoDocsQuery("No terms supplied for " + MultiPhrasePrefixQuery.class.getName()), + BooleanClause.Occur.MUST).build(); } query.add(terms.toArray(Term.class), position); return query.build(); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index 8933b56b124..a29b249375e 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -20,10 +20,12 @@ package org.elasticsearch.common.lucene.search; import org.apache.lucene.index.Term; +import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.GraphQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PrefixQuery; @@ -135,6 +137,33 @@ public class Queries { } } + /** + * Potentially apply minimum should match value if we have a query that it can be applied to, + * otherwise return the original query. + */ + public static Query maybeApplyMinimumShouldMatch(Query query, @Nullable String minimumShouldMatch) { + // If the coordination factor is disabled on a boolean query we don't apply the minimum should match. + // This is done to make sure that the minimum_should_match doesn't get applied when there is only one word + // and multiple variations of the same word in the query (synonyms for instance). + if (query instanceof BooleanQuery && !((BooleanQuery) query).isCoordDisabled()) { + return applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); + } else if (query instanceof ExtendedCommonTermsQuery) { + ((ExtendedCommonTermsQuery)query).setLowFreqMinimumNumberShouldMatch(minimumShouldMatch); + } else if (query instanceof GraphQuery && ((GraphQuery) query).hasBoolean()) { + // we have a graph query that has at least one boolean sub-query + // re-build and set minimum should match value on all boolean queries + List oldQueries = ((GraphQuery) query).getQueries(); + Query[] queries = new Query[oldQueries.size()]; + for (int i = 0; i < queries.length; i++) { + queries[i] = maybeApplyMinimumShouldMatch(oldQueries.get(i), minimumShouldMatch); + } + + return new GraphQuery(queries); + } + + return query; + } + private static Pattern spaceAroundLessThanPattern = Pattern.compile("(\\s+<\\s*)|(\\s*<\\s+)"); private static Pattern spacePattern = Pattern.compile(" "); private static Pattern lessThanPattern = Pattern.compile("<"); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java index fd7c8f6c49d..fe12622748e 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.lucene.Lucene; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -157,7 +156,7 @@ public class FiltersFunctionScoreQuery extends Query { final Weight[] filterWeights; final boolean needsScores; - public CustomBoostFactorWeight(Query parent, Weight subQueryWeight, Weight[] filterWeights, boolean needsScores) throws IOException { + CustomBoostFactorWeight(Query parent, Weight subQueryWeight, Weight[] filterWeights, boolean needsScores) throws IOException { super(parent); this.subQueryWeight = subQueryWeight; this.filterWeights = filterWeights; diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java index 5e94e82021f..61de1ab303f 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java @@ -109,7 +109,7 @@ public class FunctionScoreQuery extends Query { final Weight subQueryWeight; final boolean needsScores; - public CustomBoostFactorWeight(Query parent, Weight subQueryWeight, boolean needsScores) throws IOException { + CustomBoostFactorWeight(Query parent, Weight subQueryWeight, boolean needsScores) throws IOException { super(parent); this.subQueryWeight = subQueryWeight; this.needsScores = needsScores; diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index 47b85250735..bee7087c1d5 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -38,7 +38,7 @@ public class ScriptScoreFunction extends ScoreFunction { protected int docid; protected float score; - public CannedScorer() { + CannedScorer() { super(null); } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java index bfe36b0a060..44c6245a812 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java @@ -68,7 +68,7 @@ public class WeightFactorFunction extends ScoreFunction { @Override public boolean needsScores() { - return false; + return scoreFunction.needsScores(); } public Explanation explainWeight() { diff --git a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java index 67f06c4f8d0..6e4e3e1923d 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java @@ -57,7 +57,7 @@ final class PerThreadIDAndVersionLookup { /** * Initialize lookup for the provided segment */ - public PerThreadIDAndVersionLookup(LeafReader reader) throws IOException { + PerThreadIDAndVersionLookup(LeafReader reader) throws IOException { TermsEnum termsEnum = null; NumericDocValues versions = null; diff --git a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java index 7fd4cc6d2f3..8ad85150299 100644 --- a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java +++ b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java @@ -34,17 +34,17 @@ import java.util.Locale; /** * Simple class to log {@code ifconfig}-style output at DEBUG logging. */ -final class IfConfig { +public final class IfConfig { private static final Logger logger = Loggers.getLogger(IfConfig.class); private static final String INDENT = " "; /** log interface configuration at debug level, if its enabled */ - static void logIfNecessary() { + public static void logIfNecessary() { if (logger.isDebugEnabled()) { try { doLogging(); - } catch (IOException | SecurityException e) { + } catch (IOException e) { logger.warn("unable to gather network information", e); } } diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java index 530ecefd4cf..a8356bfe10f 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -24,17 +24,18 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimary import org.elasticsearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand; -import org.elasticsearch.cluster.routing.allocation.command.AllocationCommandRegistry; import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.plugins.NetworkPlugin; @@ -46,6 +47,7 @@ import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -75,12 +77,8 @@ public final class NetworkModule { private final Settings settings; private final boolean transportClient; - private static final AllocationCommandRegistry allocationCommandRegistry = new AllocationCommandRegistry(); private static final List namedWriteables = new ArrayList<>(); - - private final Map> transportFactories = new HashMap<>(); - private final Map> transportHttpFactories = new HashMap<>(); - private final List transportIntercetors = new ArrayList<>(); + private static final List namedXContents = new ArrayList<>(); static { registerAllocationCommand(CancelAllocationCommand::new, CancelAllocationCommand::fromXContent, @@ -98,6 +96,11 @@ public final class NetworkModule { namedWriteables.add( new NamedWriteableRegistry.Entry(Task.Status.class, RawTaskStatus.NAME, RawTaskStatus::new)); } + + private final Map> transportFactories = new HashMap<>(); + private final Map> transportHttpFactories = new HashMap<>(); + private final List transportIntercetors = new ArrayList<>(); + /** * Creates a network module that custom networking classes can be plugged into. * @param settings The settings for the node @@ -107,13 +110,14 @@ public final class NetworkModule { BigArrays bigArrays, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService) { + NamedXContentRegistry xContentRegistry, + NetworkService networkService, HttpServerTransport.Dispatcher dispatcher) { this.settings = settings; this.transportClient = transportClient; for (NetworkPlugin plugin : plugins) { if (transportClient == false && HTTP_ENABLED.get(settings)) { Map> httpTransportFactory = plugin.getHttpTransports(settings, threadPool, bigArrays, - circuitBreakerService, namedWriteableRegistry, networkService); + circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, dispatcher); for (Map.Entry> entry : httpTransportFactory.entrySet()) { registerHttpTransport(entry.getKey(), entry.getValue()); } @@ -123,7 +127,8 @@ public final class NetworkModule { for (Map.Entry> entry : httpTransportFactory.entrySet()) { registerTransport(entry.getKey(), entry.getValue()); } - List transportInterceptors = plugin.getTransportInterceptors(namedWriteableRegistry); + List transportInterceptors = plugin.getTransportInterceptors(namedWriteableRegistry, + threadPool.getThreadContext()); for (TransportInterceptor interceptor : transportInterceptors) { registerTransportInterceptor(interceptor); } @@ -163,22 +168,19 @@ public final class NetworkModule { * it is the name under which the command's reader is registered. */ private static void registerAllocationCommand(Writeable.Reader reader, - AllocationCommand.Parser parser, ParseField commandName) { - allocationCommandRegistry.register(parser, commandName); - namedWriteables.add(new Entry(AllocationCommand.class, commandName.getPreferredName(), reader)); + CheckedFunction parser, ParseField commandName) { + namedXContents.add(new NamedXContentRegistry.Entry(AllocationCommand.class, commandName, parser)); + namedWriteables.add(new NamedWriteableRegistry.Entry(AllocationCommand.class, commandName.getPreferredName(), reader)); } - /** - * The registry of allocation command parsers. - */ - public static AllocationCommandRegistry getAllocationCommandRegistry() { - return allocationCommandRegistry; - } - - public static List getNamedWriteables() { + public static List getNamedWriteables() { return Collections.unmodifiableList(namedWriteables); } + public static List getNamedXContents() { + return Collections.unmodifiableList(namedXContents); + } + public Supplier getHttpServerTransportSupplier() { final String name; if (HTTP_TYPE_SETTING.exists(settings)) { @@ -235,9 +237,10 @@ public final class NetworkModule { @Override public TransportRequestHandler interceptHandler(String action, String executor, + boolean forceExecution, TransportRequestHandler actualHandler) { for (TransportInterceptor interceptor : this.transportInterceptors) { - actualHandler = interceptor.interceptHandler(action, executor, actualHandler); + actualHandler = interceptor.interceptHandler(action, executor, forceExecution, actualHandler); } return actualHandler; } diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java index b72acf8064c..a9d3dc4a336 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -60,12 +60,6 @@ public class NetworkService extends AbstractComponent { Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), Property.NodeScope); public static final Setting TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), Property.NodeScope); - public static final Setting TCP_BLOCKING = - Setting.boolSetting("network.tcp.blocking", false, Property.NodeScope); - public static final Setting TCP_BLOCKING_SERVER = - Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, Property.NodeScope); - public static final Setting TCP_BLOCKING_CLIENT = - Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, Property.NodeScope); public static final Setting TCP_CONNECT_TIMEOUT = Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope); } @@ -90,7 +84,6 @@ public class NetworkService extends AbstractComponent { public NetworkService(Settings settings, List customNameResolvers) { super(settings); - IfConfig.logIfNecessary(); this.customNameResolvers = customNameResolvers; } diff --git a/core/src/main/java/org/elasticsearch/common/path/PathTrie.java b/core/src/main/java/org/elasticsearch/common/path/PathTrie.java index c711bfa2a61..44accbf190a 100644 --- a/core/src/main/java/org/elasticsearch/common/path/PathTrie.java +++ b/core/src/main/java/org/elasticsearch/common/path/PathTrie.java @@ -111,7 +111,10 @@ public class PathTrie { // in case the target(last) node already exist but without a value // than the value should be updated. if (index == (path.length - 1)) { - assert (node.value == null || node.value == value); + if (node.value != null) { + throw new IllegalArgumentException("Path [" + String.join("/", path)+ "] already has a value [" + + node.value + "]"); + } if (node.value == null) { node.value = value; } @@ -190,6 +193,9 @@ public class PathTrie { public void insert(String path, T value) { String[] strings = path.split(SEPARATOR); if (strings.length == 0) { + if (rootValue != null) { + throw new IllegalArgumentException("Path [/] already has a value [" + rootValue + "]"); + } rootValue = value; return; } diff --git a/core/src/main/java/org/elasticsearch/common/recycler/Recyclers.java b/core/src/main/java/org/elasticsearch/common/recycler/Recyclers.java index 5bac8f7bcfd..f84441fbce4 100644 --- a/core/src/main/java/org/elasticsearch/common/recycler/Recyclers.java +++ b/core/src/main/java/org/elasticsearch/common/recycler/Recyclers.java @@ -170,7 +170,7 @@ public enum Recyclers { } } - final int slot() { + int slot() { final long id = Thread.currentThread().getId(); // don't trust Thread.hashCode to have equiprobable low bits int slot = (int) BitMixer.mix64(id); diff --git a/core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java b/core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java index 3bf4f460d71..9a3e40ac844 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java @@ -40,7 +40,7 @@ public enum DateTimeUnit { private final byte id; private final Function fieldFunction; - private DateTimeUnit(byte id, Function fieldFunction) { + DateTimeUnit(byte id, Function fieldFunction) { this.id = id; this.fieldFunction = fieldFunction; } diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 3622623987b..95333a988cd 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -195,6 +195,19 @@ public abstract class AbstractScopedSettings extends AbstractComponent { addSettingsUpdater(setting.newUpdater(consumer, logger, validator)); } + /** + * Adds a settings consumer for affix settings. Affix settings have a namespace associated to it that needs to be available to the + * consumer in order to be processed correctly. + */ + public synchronized void addAffixUpdateConsumer(Setting.AffixSetting setting, BiConsumer consumer, + BiConsumer validator) { + final Setting registeredSetting = this.complexMatchers.get(setting.getKey()); + if (setting != registeredSetting) { + throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); + } + addSettingsUpdater(setting.newAffixUpdater(consumer, logger, validator)); + } + synchronized void addSettingsUpdater(SettingUpdater updater) { this.settingUpdaters.add(updater); } @@ -239,11 +252,9 @@ public abstract class AbstractScopedSettings extends AbstractComponent { */ public final void validate(Settings settings) { List exceptions = new ArrayList<>(); - // we want them sorted for deterministic error messages - SortedMap sortedSettings = new TreeMap<>(settings.getAsMap()); - for (Map.Entry entry : sortedSettings.entrySet()) { + for (String key : settings.getAsMap().keySet()) { // settings iterate in deterministic fashion try { - validate(entry.getKey(), settings); + validate(key, settings); } catch (RuntimeException ex) { exceptions.add(ex); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/AddStringKeyStoreCommand.java b/core/src/main/java/org/elasticsearch/common/settings/AddStringKeyStoreCommand.java new file mode 100644 index 00000000000..741469caa58 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/AddStringKeyStoreCommand.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.env.Environment; + +/** + * A subcommand for the keystore cli which adds a string setting. + */ +class AddStringKeyStoreCommand extends EnvironmentAwareCommand { + + private final OptionSpec stdinOption; + private final OptionSpec forceOption; + private final OptionSpec arguments; + + AddStringKeyStoreCommand() { + super("Add a string setting to the keystore"); + this.stdinOption = parser.acceptsAll(Arrays.asList("x", "stdin"), "Read setting value from stdin"); + this.forceOption = parser.acceptsAll(Arrays.asList("f", "force"), "Overwrite existing setting without prompting"); + this.arguments = parser.nonOptions("setting name"); + } + + // pkg private so tests can manipulate + InputStream getStdin() { + return System.in; + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile()); + if (keystore == null) { + throw new UserException(ExitCodes.DATA_ERROR, "Elasticsearch keystore not found. Use 'create' command to create one."); + } + + keystore.decrypt(new char[0] /* TODO: prompt for password when they are supported */); + + String setting = arguments.value(options); + if (setting == null) { + throw new UserException(ExitCodes.USAGE, "The setting name can not be null"); + } + if (keystore.getSettings().contains(setting) && options.has(forceOption) == false) { + if (terminal.promptYesNo("Setting " + setting + " already exists. Overwrite?", false) == false) { + terminal.println("Exiting without modifying keystore."); + return; + } + } + + final char[] value; + if (options.has(stdinOption)) { + BufferedReader stdinReader = new BufferedReader(new InputStreamReader(getStdin(), StandardCharsets.UTF_8)); + value = stdinReader.readLine().toCharArray(); + } else { + value = terminal.readSecret("Enter value for " + setting + ": "); + } + + try { + keystore.setString(setting, value); + } catch (IllegalArgumentException e) { + throw new UserException(ExitCodes.DATA_ERROR, "String value must contain only ASCII"); + } + keystore.save(env.configFile()); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index c9106b8cdba..69adc5878ff 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.settings; import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; +import org.elasticsearch.action.search.RemoteClusterService; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.DestructiveOperations; @@ -54,9 +55,9 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ElectMasterService; -import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.FaultDetection; import org.elasticsearch.discovery.zen.UnicastZenPing; +import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.GatewayService; @@ -80,7 +81,6 @@ import org.elasticsearch.monitor.process.ProcessService; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.fs.FsRepository; -import org.elasticsearch.repositories.uri.URLRepository; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchModule; @@ -237,6 +237,7 @@ public final class ClusterSettings extends AbstractScopedSettings { HttpTransportSettings.SETTING_CORS_ALLOW_METHODS, HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS, HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED, + HttpTransportSettings.SETTING_HTTP_CONTENT_TYPE_REQUIRED, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH, HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE, HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE, @@ -253,6 +254,10 @@ public final class ClusterSettings extends AbstractScopedSettings { SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, + RemoteClusterService.REMOTE_CLUSTERS_SEEDS, + RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER, + RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, + RemoteClusterService.REMOTE_NODE_ATTRIBUTE, TransportService.TRACE_LOG_EXCLUDE_SETTING, TransportService.TRACE_LOG_INCLUDE_SETTING, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, @@ -273,7 +278,6 @@ public final class ClusterSettings extends AbstractScopedSettings { TcpTransport.CONNECTIONS_PER_NODE_STATE, TcpTransport.CONNECTIONS_PER_NODE_PING, TcpTransport.PING_SCHEDULE, - TcpTransport.TCP_BLOCKING_CLIENT, TcpTransport.TCP_CONNECT_TIMEOUT, NetworkService.NETWORK_SERVER, TcpTransport.TCP_NO_DELAY, @@ -281,7 +285,6 @@ public final class ClusterSettings extends AbstractScopedSettings { TcpTransport.TCP_REUSE_ADDRESS, TcpTransport.TCP_SEND_BUFFER_SIZE, TcpTransport.TCP_RECEIVE_BUFFER_SIZE, - TcpTransport.TCP_BLOCKING_SERVER, NetworkService.GLOBAL_NETWORK_HOST_SETTING, NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING, NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING, @@ -290,9 +293,6 @@ public final class ClusterSettings extends AbstractScopedSettings { NetworkService.TcpSettings.TCP_REUSE_ADDRESS, NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, - NetworkService.TcpSettings.TCP_BLOCKING, - NetworkService.TcpSettings.TCP_BLOCKING_SERVER, - NetworkService.TcpSettings.TCP_BLOCKING_CLIENT, NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT, IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, @@ -348,9 +348,6 @@ public final class ClusterSettings extends AbstractScopedSettings { Node.NODE_INGEST_SETTING, Node.NODE_ATTRIBUTES, Node.NODE_LOCAL_STORAGE_SETTING, - URLRepository.ALLOWED_URLS_SETTING, - URLRepository.REPOSITORIES_URL_SETTING, - URLRepository.SUPPORTED_PROTOCOLS_SETTING, TransportMasterNodeReadAction.FORCE_LOCAL_SETTING, AutoCreateIndex.AUTO_CREATE_INDEX_SETTING, BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX, diff --git a/core/src/main/java/org/elasticsearch/common/settings/CreateKeyStoreCommand.java b/core/src/main/java/org/elasticsearch/common/settings/CreateKeyStoreCommand.java new file mode 100644 index 00000000000..08860cb5ea9 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/CreateKeyStoreCommand.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import java.nio.file.Files; +import java.nio.file.Path; + +import joptsimple.OptionSet; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.env.Environment; + +/** + * A subcommand for the keystore cli to create a new keystore. + */ +class CreateKeyStoreCommand extends EnvironmentAwareCommand { + + CreateKeyStoreCommand() { + super("Creates a new elasticsearch keystore"); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + Path keystoreFile = KeyStoreWrapper.keystorePath(env.configFile()); + if (Files.exists(keystoreFile)) { + if (terminal.promptYesNo("An elasticsearch keystore already exists. Overwrite?", false) == false) { + terminal.println("Exiting without creating keystore."); + return; + } + } + + + char[] password = new char[0];// terminal.readSecret("Enter passphrase (empty for no passphrase): "); + /* TODO: uncomment when entering passwords on startup is supported + char[] passwordRepeat = terminal.readSecret("Enter same passphrase again: "); + if (Arrays.equals(password, passwordRepeat) == false) { + throw new UserException(ExitCodes.DATA_ERROR, "Passphrases are not equal, exiting."); + }*/ + + KeyStoreWrapper keystore = KeyStoreWrapper.create(password); + keystore.save(env.configFile()); + terminal.println("Created elasticsearch keystore in " + env.configFile()); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 959fe1849ea..1ea02252641 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -36,7 +36,7 @@ import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.seqno.LocalCheckpointService; +import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.index.store.Store; @@ -69,6 +69,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING, IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING, + IndexMetaData.INDEX_ROUTING_PARTITION_SIZE_SETTING, IndexMetaData.INDEX_SHADOW_REPLICAS_SETTING, IndexMetaData.INDEX_SHARED_FILESYSTEM_SETTING, IndexMetaData.INDEX_READ_ONLY_SETTING, @@ -107,13 +108,14 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_REFRESH_INTERVAL_SETTING, IndexSettings.MAX_RESULT_WINDOW_SETTING, IndexSettings.MAX_RESCORE_WINDOW_SETTING, + IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING, IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING, IndexSettings.DEFAULT_FIELD_SETTING, IndexSettings.QUERY_STRING_LENIENT_SETTING, IndexSettings.ALLOW_UNMAPPED, IndexSettings.INDEX_CHECK_ON_STARTUP, IndexSettings.INDEX_SEQ_NO_CHECKPOINT_SYNC_INTERVAL, - LocalCheckpointService.SETTINGS_BIT_ARRAYS_SIZE, + LocalCheckpointTracker.SETTINGS_BIT_ARRAYS_SIZE, IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, IndexSettings.MAX_SLICES_PER_SCROLL, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING, diff --git a/core/src/main/java/org/elasticsearch/search/suggest/Suggesters.java b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java similarity index 52% rename from core/src/main/java/org/elasticsearch/search/suggest/Suggesters.java rename to core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java index 3f3f805d8f7..5bded392fdb 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/Suggesters.java +++ b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java @@ -16,25 +16,26 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.suggest; -import java.util.Map; +package org.elasticsearch.common.settings; + +import org.elasticsearch.cli.MultiCommand; +import org.elasticsearch.cli.Terminal; /** - * Registry of Suggesters. This is only its own class to make Guice happy. + * A cli tool for managing secrets in the elasticsearch keystore. */ -public final class Suggesters { - private final Map> suggesters; +public class KeyStoreCli extends MultiCommand { - public Suggesters(Map> suggesters) { - this.suggesters = suggesters; + private KeyStoreCli() { + super("A tool for managing settings stored in the elasticsearch keystore"); + subcommands.put("create", new CreateKeyStoreCommand()); + subcommands.put("list", new ListKeyStoreCommand()); + subcommands.put("add", new AddStringKeyStoreCommand()); + subcommands.put("remove", new RemoveSettingKeyStoreCommand()); } - public Suggester getSuggester(String suggesterName) { - Suggester suggester = suggesters.get(suggesterName); - if (suggester == null) { - throw new IllegalArgumentException("suggester with name [" + suggesterName + "] not supported"); - } - return suggester; + public static void main(String[] args) throws Exception { + exit(new KeyStoreCli().main(args, Terminal.DEFAULT)); } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java new file mode 100644 index 00000000000..f77b63d88fa --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -0,0 +1,284 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.PBEKeySpec; +import javax.security.auth.DestroyFailedException; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.nio.CharBuffer; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermissions; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; +import java.util.Enumeration; +import java.util.HashSet; +import java.util.Locale; +import java.util.Set; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.BufferedChecksumIndexInput; +import org.apache.lucene.store.ChecksumIndexInput; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; + +/** + * A wrapper around a Java KeyStore which provides supplements the keystore with extra metadata. + * + * Loading a keystore has 2 phases. First, call {@link #load(Path)}. Then call + * {@link #decrypt(char[])} with the keystore password, or an empty char array if + * {@link #hasPassword()} is {@code false}. Loading and decrypting should happen + * in a single thread. Once decrypted, keys may be read with the wrapper in + * multiple threads. + */ +public class KeyStoreWrapper implements SecureSettings { + + /** The name of the keystore file to read and write. */ + private static final String KEYSTORE_FILENAME = "elasticsearch.keystore"; + + /** The version of the metadata written before the keystore data. */ + private static final int FORMAT_VERSION = 1; + + /** The keystore type for a newly created keystore. */ + private static final String NEW_KEYSTORE_TYPE = "PKCS12"; + + /** The algorithm used to store password for a newly created keystore. */ + private static final String NEW_KEYSTORE_SECRET_KEY_ALGO = "PBE";//"PBEWithHmacSHA256AndAES_128"; + + /** An encoder to check whether string values are ascii. */ + private static final CharsetEncoder ASCII_ENCODER = StandardCharsets.US_ASCII.newEncoder(); + + /** True iff the keystore has a password needed to read. */ + private final boolean hasPassword; + + /** The type of the keystore, as passed to {@link java.security.KeyStore#getInstance(String)} */ + private final String type; + + /** A factory necessary for constructing instances of secrets in a {@link KeyStore}. */ + private final SecretKeyFactory secretFactory; + + /** The raw bytes of the encrypted keystore. */ + private final byte[] keystoreBytes; + + /** The loaded keystore. See {@link #decrypt(char[])}. */ + private final SetOnce keystore = new SetOnce<>(); + + /** The password for the keystore. See {@link #decrypt(char[])}. */ + private final SetOnce keystorePassword = new SetOnce<>(); + + /** The setting names contained in the loaded keystore. */ + private final Set settingNames = new HashSet<>(); + + private KeyStoreWrapper(boolean hasPassword, String type, String secretKeyAlgo, byte[] keystoreBytes) { + this.hasPassword = hasPassword; + this.type = type; + try { + secretFactory = SecretKeyFactory.getInstance(secretKeyAlgo); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + this.keystoreBytes = keystoreBytes; + } + + /** Returns a path representing the ES keystore in the given config dir. */ + static Path keystorePath(Path configDir) { + return configDir.resolve(KEYSTORE_FILENAME); + } + + /** Constructs a new keystore with the given password. */ + static KeyStoreWrapper create(char[] password) throws Exception { + KeyStoreWrapper wrapper = new KeyStoreWrapper(password.length != 0, NEW_KEYSTORE_TYPE, NEW_KEYSTORE_SECRET_KEY_ALGO, null); + KeyStore keyStore = KeyStore.getInstance(NEW_KEYSTORE_TYPE); + keyStore.load(null, null); + wrapper.keystore.set(keyStore); + wrapper.keystorePassword.set(new KeyStore.PasswordProtection(password)); + return wrapper; + } + + /** + * Loads information about the Elasticsearch keystore from the provided config directory. + * + * {@link #decrypt(char[])} must be called before reading or writing any entries. + * Returns {@code null} if no keystore exists. + */ + public static KeyStoreWrapper load(Path configDir) throws IOException { + Path keystoreFile = keystorePath(configDir); + if (Files.exists(keystoreFile) == false) { + return null; + } + + SimpleFSDirectory directory = new SimpleFSDirectory(configDir); + try (IndexInput indexInput = directory.openInput(KEYSTORE_FILENAME, IOContext.READONCE)) { + ChecksumIndexInput input = new BufferedChecksumIndexInput(indexInput); + CodecUtil.checkHeader(input, KEYSTORE_FILENAME, FORMAT_VERSION, FORMAT_VERSION); + byte hasPasswordByte = input.readByte(); + boolean hasPassword = hasPasswordByte == 1; + if (hasPassword == false && hasPasswordByte != 0) { + throw new IllegalStateException("hasPassword boolean is corrupt: " + + String.format(Locale.ROOT, "%02x", hasPasswordByte)); + } + String type = input.readString(); + String secretKeyAlgo = input.readString(); + byte[] keystoreBytes = new byte[input.readInt()]; + input.readBytes(keystoreBytes, 0, keystoreBytes.length); + CodecUtil.checkFooter(input); + return new KeyStoreWrapper(hasPassword, type, secretKeyAlgo, keystoreBytes); + } + } + + @Override + public boolean isLoaded() { + return keystore.get() != null; + } + + /** Return true iff calling {@link #decrypt(char[])} requires a non-empty password. */ + public boolean hasPassword() { + return hasPassword; + } + + /** + * Decrypts the underlying java keystore. + * + * This may only be called once. The provided password will be zeroed out. + */ + public void decrypt(char[] password) throws GeneralSecurityException, IOException { + if (keystore.get() != null) { + throw new IllegalStateException("Keystore has already been decrypted"); + } + keystore.set(KeyStore.getInstance(type)); + try (InputStream in = new ByteArrayInputStream(keystoreBytes)) { + keystore.get().load(in, password); + } finally { + Arrays.fill(keystoreBytes, (byte)0); + } + + keystorePassword.set(new KeyStore.PasswordProtection(password)); + Arrays.fill(password, '\0'); + + // convert keystore aliases enum into a set for easy lookup + Enumeration aliases = keystore.get().aliases(); + while (aliases.hasMoreElements()) { + settingNames.add(aliases.nextElement()); + } + } + + /** Write the keystore to the given config directory. */ + void save(Path configDir) throws Exception { + char[] password = this.keystorePassword.get().getPassword(); + + SimpleFSDirectory directory = new SimpleFSDirectory(configDir); + // write to tmp file first, then overwrite + String tmpFile = KEYSTORE_FILENAME + ".tmp"; + try (IndexOutput output = directory.createOutput(tmpFile, IOContext.DEFAULT)) { + CodecUtil.writeHeader(output, KEYSTORE_FILENAME, FORMAT_VERSION); + output.writeByte(password.length == 0 ? (byte)0 : (byte)1); + output.writeString(type); + output.writeString(secretFactory.getAlgorithm()); + + ByteArrayOutputStream keystoreBytesStream = new ByteArrayOutputStream(); + keystore.get().store(keystoreBytesStream, password); + byte[] keystoreBytes = keystoreBytesStream.toByteArray(); + output.writeInt(keystoreBytes.length); + output.writeBytes(keystoreBytes, keystoreBytes.length); + CodecUtil.writeFooter(output); + } + + Path keystoreFile = keystorePath(configDir); + Files.move(configDir.resolve(tmpFile), keystoreFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE); + PosixFileAttributeView attrs = Files.getFileAttributeView(keystoreFile, PosixFileAttributeView.class); + if (attrs != null) { + // don't rely on umask: ensure the keystore has minimal permissions + attrs.setPermissions(PosixFilePermissions.fromString("rw-------")); + } + } + + public Set getSettings() { + return settingNames; + } + + @Override + public boolean hasSetting(String setting) { + return settingNames.contains(setting); + } + + // TODO: make settings accessible only to code that registered the setting + /** Retrieve a string setting. The {@link SecureString} should be closed once it is used. */ + @Override + public SecureString getString(String setting) throws GeneralSecurityException { + KeyStore.Entry entry = keystore.get().getEntry(setting, keystorePassword.get()); + if (entry instanceof KeyStore.SecretKeyEntry == false) { + throw new IllegalStateException("Secret setting " + setting + " is not a string"); + } + // TODO: only allow getting a setting once? + KeyStore.SecretKeyEntry secretKeyEntry = (KeyStore.SecretKeyEntry) entry; + PBEKeySpec keySpec = (PBEKeySpec) secretFactory.getKeySpec(secretKeyEntry.getSecretKey(), PBEKeySpec.class); + SecureString value = new SecureString(keySpec.getPassword()); + keySpec.clearPassword(); + return value; + } + + /** + * Set a string setting. + * + * @throws IllegalArgumentException if the value is not ASCII + */ + void setString(String setting, char[] value) throws GeneralSecurityException { + if (ASCII_ENCODER.canEncode(CharBuffer.wrap(value)) == false) { + throw new IllegalArgumentException("Value must be ascii"); + } + SecretKey secretKey = secretFactory.generateSecret(new PBEKeySpec(value)); + keystore.get().setEntry(setting, new KeyStore.SecretKeyEntry(secretKey), keystorePassword.get()); + settingNames.add(setting); + } + + /** Remove the given setting from the keystore. */ + void remove(String setting) throws KeyStoreException { + keystore.get().deleteEntry(setting); + settingNames.remove(setting); + } + + @Override + public void close() throws IOException { + try { + if (keystorePassword.get() != null) { + keystorePassword.get().destroy(); + } + } catch (DestroyFailedException e) { + throw new IOException(e); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/ListKeyStoreCommand.java b/core/src/main/java/org/elasticsearch/common/settings/ListKeyStoreCommand.java new file mode 100644 index 00000000000..b0484fb1526 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/ListKeyStoreCommand.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import joptsimple.OptionSet; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.env.Environment; + +/** + * A subcommand for the keystore cli to list all settings in the keystore. + */ +class ListKeyStoreCommand extends EnvironmentAwareCommand { + + ListKeyStoreCommand() { + super("List entries in the keystore"); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile()); + if (keystore == null) { + throw new UserException(ExitCodes.DATA_ERROR, "Elasticsearch keystore not found. Use 'create' command to create one."); + } + + keystore.decrypt(new char[0] /* TODO: prompt for password when they are supported */); + + List sortedEntries = new ArrayList<>(keystore.getSettings()); + Collections.sort(sortedEntries); + for (String entry : sortedEntries) { + terminal.println(entry); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/RemoveSettingKeyStoreCommand.java b/core/src/main/java/org/elasticsearch/common/settings/RemoveSettingKeyStoreCommand.java new file mode 100644 index 00000000000..e9089b85b56 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/RemoveSettingKeyStoreCommand.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import java.util.List; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.env.Environment; + +/** + * A subcommand for the keystore cli to remove a setting. + */ +class RemoveSettingKeyStoreCommand extends EnvironmentAwareCommand { + + private final OptionSpec arguments; + + RemoveSettingKeyStoreCommand() { + super("Remove a setting from the keystore"); + arguments = parser.nonOptions("setting names"); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + List settings = arguments.values(options); + if (settings.isEmpty()) { + throw new UserException(ExitCodes.USAGE, "Must supply at least one setting to remove"); + } + + KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile()); + if (keystore == null) { + throw new UserException(ExitCodes.DATA_ERROR, "Elasticsearch keystore not found. Use 'create' command to create one."); + } + + keystore.decrypt(new char[0] /* TODO: prompt for password when they are supported */); + + for (String setting : arguments.values(options)) { + if (keystore.getSettings().contains(setting) == false) { + throw new UserException(ExitCodes.CONFIG, "Setting [" + setting + "] does not exist in the keystore."); + } + keystore.remove(setting); + } + keystore.save(env.configFile()); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java b/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java new file mode 100644 index 00000000000..c06dad2a2ea --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java @@ -0,0 +1,152 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import java.security.GeneralSecurityException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.ArrayUtils; + + +/** + * A secure setting. + * + * This class allows access to settings from the Elasticsearch keystore. + */ +public abstract class SecureSetting extends Setting { + private static final Set ALLOWED_PROPERTIES = new HashSet<>( + Arrays.asList(Property.Deprecated, Property.Shared) + ); + + private static final Property[] FIXED_PROPERTIES = { + Property.NodeScope + }; + + private static final Property[] LEGACY_PROPERTIES = { + Property.NodeScope, Property.Deprecated, Property.Filtered + }; + + private SecureSetting(String key, Property... properties) { + super(key, (String)null, null, ArrayUtils.concat(properties, FIXED_PROPERTIES, Property.class)); + assert assertAllowedProperties(properties); + } + + private boolean assertAllowedProperties(Setting.Property... properties) { + for (Setting.Property property : properties) { + if (ALLOWED_PROPERTIES.contains(property) == false) { + return false; + } + } + return true; + } + + @Override + public String getDefaultRaw(Settings settings) { + throw new UnsupportedOperationException("secure settings are not strings"); + } + + @Override + public T getDefault(Settings settings) { + throw new UnsupportedOperationException("secure settings are not strings"); + } + + @Override + public String getRaw(Settings settings) { + throw new UnsupportedOperationException("secure settings are not strings"); + } + + @Override + public boolean exists(Settings settings) { + final SecureSettings secureSettings = settings.getSecureSettings(); + return secureSettings != null && secureSettings.hasSetting(getKey()); + } + + @Override + public T get(Settings settings) { + checkDeprecation(settings); + final SecureSettings secureSettings = settings.getSecureSettings(); + if (secureSettings == null || secureSettings.hasSetting(getKey()) == false) { + return getFallback(settings); + } + try { + return getSecret(secureSettings); + } catch (GeneralSecurityException e) { + throw new RuntimeException("failed to read secure setting " + getKey(), e); + } + } + + /** Returns the secret setting from the keyStoreReader store. */ + abstract T getSecret(SecureSettings secureSettings) throws GeneralSecurityException; + + /** Returns the value from a fallback setting. Returns null if no fallback exists. */ + abstract T getFallback(Settings settings); + + // TODO: override toXContent + + /** + * A setting which contains a sensitive string. + * + * This may be any sensitive string, e.g. a username, a password, an auth token, etc. + */ + public static Setting secureString(String name, Setting fallback, + boolean allowLegacy, Property... properties) { + final Setting legacy; + if (allowLegacy) { + Property[] legacyProperties = ArrayUtils.concat(properties, LEGACY_PROPERTIES, Property.class); + legacy = Setting.simpleString(name, legacyProperties); + } else { + legacy = null; + } + return new SecureSetting(name, properties) { + @Override + protected SecureString getSecret(SecureSettings secureSettings) throws GeneralSecurityException { + return secureSettings.getString(getKey()); + } + @Override + SecureString getFallback(Settings settings) { + if (legacy != null && legacy.exists(settings)) { + return new SecureString(legacy.get(settings).toCharArray()); + } + if (fallback != null) { + return fallback.get(settings); + } + return new SecureString(new char[0]); // this means "setting does not exist" + } + @Override + protected void checkDeprecation(Settings settings) { + super.checkDeprecation(settings); + if (legacy != null) { + legacy.checkDeprecation(settings); + } + } + @Override + public boolean exists(Settings settings) { + // handle legacy, which is internal to this setting + return super.exists(settings) || legacy != null && legacy.exists(settings); + } + }; + } + + +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java b/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java new file mode 100644 index 00000000000..d810e1ec4da --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import java.io.Closeable; +import java.security.GeneralSecurityException; + +/** + * An accessor for settings which are securely stored. See {@link SecureSetting}. + */ +public interface SecureSettings extends Closeable { + + /** Returns true iff the settings are loaded and retrievable. */ + boolean isLoaded(); + + /** Returns true iff the given setting exists in this secure settings. */ + boolean hasSetting(String setting); + + /** Return a string setting. The {@link SecureString} should be closed once it is used. */ + SecureString getString(String setting) throws GeneralSecurityException; +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/SecureString.java b/core/src/main/java/org/elasticsearch/common/settings/SecureString.java new file mode 100644 index 00000000000..36982ddde1c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/SecureString.java @@ -0,0 +1,119 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import java.io.Closeable; +import java.util.Arrays; +import java.util.Objects; + +/** + * A String implementations which allows clearing the underlying char array. + */ +public final class SecureString implements CharSequence, Closeable { + + private char[] chars; + + /** + * Constructs a new SecureString which controls the passed in char array. + * + * Note: When this instance is closed, the array will be zeroed out. + */ + public SecureString(char[] chars) { + this.chars = Objects.requireNonNull(chars); + } + + /** + * Constructs a new SecureString from an existing String. + * + * NOTE: This is not actually secure, since the provided String cannot be deallocated, but + * this constructor allows for easy compatibility between new and old apis. + * + * @deprecated Only use for compatibility between deprecated string settings and new secure strings + */ + @Deprecated + public SecureString(String s) { + this(s.toCharArray()); + } + + /** Constant time equality to avoid potential timing attacks. */ + @Override + public synchronized boolean equals(Object o) { + ensureNotClosed(); + if (this == o) return true; + if (o == null || o instanceof CharSequence == false) return false; + CharSequence that = (CharSequence) o; + if (chars.length != that.length()) { + return false; + } + + int equals = 0; + for (int i = 0; i < chars.length; i++) { + equals |= chars[i] ^ that.charAt(i); + } + + return equals == 0; + } + + @Override + public synchronized int hashCode() { + return Arrays.hashCode(chars); + } + + @Override + public synchronized int length() { + ensureNotClosed(); + return chars.length; + } + + @Override + public synchronized char charAt(int index) { + ensureNotClosed(); + return chars[index]; + } + + @Override + public SecureString subSequence(int start, int end) { + throw new UnsupportedOperationException("Cannot get subsequence of SecureString"); + } + + /** + * Convert to a {@link String}. This should only be used with APIs that do not take {@link CharSequence}. + */ + @Override + public synchronized String toString() { + return new String(chars); + } + + /** + * Closes the string by clearing the underlying char array. + */ + @Override + public synchronized void close() { + Arrays.fill(chars, '\0'); + chars = null; + } + + /** Throw an exception if this string has been closed, indicating something is trying to access the data after being closed. */ + private void ensureNotClosed() { + if (chars == null) { + throw new IllegalStateException("SecureString has already been closed"); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 22c74afee7c..f85894790a8 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.MemorySizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -41,14 +42,18 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; +import java.util.HashMap; +import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; +import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; +import java.util.stream.Stream; /** * A setting. Encapsulates typical stuff like default value, parsing, and scope. @@ -118,7 +123,8 @@ public class Setting extends ToXContentToBytes { private Setting(Key key, @Nullable Setting fallbackSetting, Function defaultValue, Function parser, Property... properties) { - assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null"; + assert this instanceof SecureSetting || this.isGroupSetting() || parser.apply(defaultValue.apply(Settings.EMPTY)) != null + : "parser returned null"; this.key = key; this.fallbackSetting = fallbackSetting; this.defaultValue = defaultValue; @@ -273,7 +279,7 @@ public class Setting extends ToXContentToBytes { * Returns the default value string representation for this setting. * @param settings a settings object for settings that has a default value depending on another setting if available */ - public final String getDefaultRaw(Settings settings) { + public String getDefaultRaw(Settings settings) { return defaultValue.apply(settings); } @@ -281,7 +287,7 @@ public class Setting extends ToXContentToBytes { * Returns the default value for this setting. * @param settings a settings object for settings that has a default value depending on another setting if available */ - public final T getDefault(Settings settings) { + public T getDefault(Settings settings) { return parser.apply(getDefaultRaw(settings)); } @@ -289,7 +295,7 @@ public class Setting extends ToXContentToBytes { * Returns true iff this setting is present in the given settings object. Otherwise false */ public boolean exists(Settings settings) { - return settings.get(getKey()) != null; + return settings.getAsMap().containsKey(getKey()); } /** @@ -329,14 +335,19 @@ public class Setting extends ToXContentToBytes { * instead. This is useful if the value can't be parsed due to an invalid value to access the actual value. */ public String getRaw(Settings settings) { + checkDeprecation(settings); + return settings.get(getKey(), defaultValue.apply(settings)); + } + + /** Logs a deprecation warning if the setting is deprecated and used. */ + protected void checkDeprecation(Settings settings) { // They're using the setting, so we need to tell them to stop if (this.isDeprecated() && this.exists(settings)) { // It would be convenient to show its replacement key, but replacement is often not so simple final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(getClass())); deprecationLogger.deprecated("[{}] setting was deprecated in Elasticsearch and it will be removed in a future release! " + - "See the breaking changes lists in the documentation for details", getKey()); + "See the breaking changes lists in the documentation for details", getKey()); } - return settings.get(getKey(), defaultValue.apply(settings)); } /** @@ -404,8 +415,8 @@ public class Setting extends ToXContentToBytes { } /** - * Updates settings that depend on eachother. See {@link AbstractScopedSettings#addSettingsUpdateConsumer(Setting, Setting, BiConsumer)} - * and its usage for details. + * Updates settings that depend on each other. + * See {@link AbstractScopedSettings#addSettingsUpdateConsumer(Setting, Setting, BiConsumer)} and its usage for details. */ static AbstractScopedSettings.SettingUpdater> compoundUpdater(final BiConsumer consumer, final Setting aSetting, final Setting bSetting, Logger logger) { @@ -440,13 +451,119 @@ public class Setting extends ToXContentToBytes { }; } + public static class AffixSetting extends Setting { + private final AffixKey key; + private final Function> delegateFactory; + + public AffixSetting(AffixKey key, Setting delegate, Function> delegateFactory) { + super(key, delegate.defaultValue, delegate.parser, delegate.properties.toArray(new Property[0])); + this.key = key; + this.delegateFactory = delegateFactory; + } + + boolean isGroupSetting() { + return true; + } + + private Stream matchStream(Settings settings) { + return settings.getAsMap().keySet().stream().filter((key) -> match(key)).map(settingKey -> key.getConcreteString(settingKey)); + } + + AbstractScopedSettings.SettingUpdater, T>> newAffixUpdater( + BiConsumer consumer, Logger logger, BiConsumer validator) { + return new AbstractScopedSettings.SettingUpdater, T>>() { + + @Override + public boolean hasChanged(Settings current, Settings previous) { + return Stream.concat(matchStream(current), matchStream(previous)).findAny().isPresent(); + } + + @Override + public Map, T> getValue(Settings current, Settings previous) { + // we collect all concrete keys and then delegate to the actual setting for validation and settings extraction + final Map, T> result = new IdentityHashMap<>(); + Stream.concat(matchStream(current), matchStream(previous)).distinct().forEach(aKey -> { + String namespace = key.getNamespace(aKey); + AbstractScopedSettings.SettingUpdater updater = + getConcreteSetting(aKey).newUpdater((v) -> consumer.accept(namespace, v), logger, + (v) -> validator.accept(namespace, v)); + if (updater.hasChanged(current, previous)) { + // only the ones that have changed otherwise we might get too many updates + // the hasChanged above checks only if there are any changes + T value = updater.getValue(current, previous); + result.put(updater, value); + } + }); + return result; + } + + @Override + public void apply(Map, T> value, Settings current, Settings previous) { + for (Map.Entry, T> entry : value.entrySet()) { + entry.getKey().apply(entry.getValue(), current, previous); + } + } + }; + } + + @Override + public T get(Settings settings) { + throw new UnsupportedOperationException("affix settings can't return values" + + " use #getConcreteSetting to obtain a concrete setting"); + } + + @Override + public String getRaw(Settings settings) { + throw new UnsupportedOperationException("affix settings can't return values" + + " use #getConcreteSetting to obtain a concrete setting"); + } + + @Override + public Setting getConcreteSetting(String key) { + if (match(key)) { + return delegateFactory.apply(key); + } else { + throw new IllegalArgumentException("key [" + key + "] must match [" + getKey() + "] but didn't."); + } + } + + /** + * Get a setting with the given namespace filled in for prefix and suffix. + */ + public Setting getConcreteSettingForNamespace(String namespace) { + String fullKey = key.toConcreteKey(namespace).toString(); + return getConcreteSetting(fullKey); + } + + @Override + public void diff(Settings.Builder builder, Settings source, Settings defaultSettings) { + matchStream(defaultSettings).forEach((key) -> getConcreteSetting(key).diff(builder, source, defaultSettings)); + } + + /** + * Returns the namespace for a concrete settting. Ie. an affix setting with prefix: search. and suffix: username + * will return remote as a namespace for the setting search.remote.username + */ + public String getNamespace(Setting concreteSetting) { + return key.getNamespace(concreteSetting.getKey()); + } + + /** + * Returns a stream of all concrete setting instances for the given settings. AffixSetting is only a specification, concrete + * settings depend on an actual set of setting keys. + */ + public Stream> getAllConcreteSettings(Settings settings) { + return matchStream(settings).distinct().map(this::getConcreteSetting); + } + } + private final class Updater implements AbstractScopedSettings.SettingUpdater { private final Consumer consumer; private final Logger logger; private final Consumer accept; - public Updater(Consumer consumer, Logger logger, Consumer accept) { + Updater(Consumer consumer, Logger logger, Consumer accept) { this.consumer = consumer; this.logger = logger; this.accept = accept; @@ -559,15 +676,15 @@ public class Setting extends ToXContentToBytes { } public static Setting boolSetting(String key, boolean defaultValue, Property... properties) { - return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, properties); + return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBoolean, properties); } public static Setting boolSetting(String key, Setting fallbackSetting, Property... properties) { - return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, properties); + return new Setting<>(key, fallbackSetting, Booleans::parseBoolean, properties); } public static Setting boolSetting(String key, Function defaultValueFn, Property... properties) { - return new Setting<>(key, defaultValueFn, Booleans::parseBooleanExact, properties); + return new Setting<>(key, defaultValueFn, Booleans::parseBoolean, properties); } public static Setting byteSizeSetting(String key, ByteSizeValue value, Property... properties) { @@ -702,7 +819,8 @@ public class Setting extends ToXContentToBytes { } private static List parseableStringToList(String parsableString) { - try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(parsableString)) { + // EMPTY is safe here because we never call namedObject + try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, parsableString)) { XContentParser.Token token = xContentParser.nextToken(); if (token != XContentParser.Token.START_ARRAY) { throw new IllegalArgumentException("expected START_ARRAY but got " + token); @@ -720,7 +838,6 @@ public class Setting extends ToXContentToBytes { } } - private static String arrayToParsableString(String[] array) { try { XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); @@ -734,9 +851,11 @@ public class Setting extends ToXContentToBytes { throw new ElasticsearchException(ex); } } + public static Setting groupSetting(String key, Property... properties) { return groupSetting(key, (s) -> {}, properties); } + public static Setting groupSetting(String key, Consumer validator, Property... properties) { return new Setting(new GroupKey(key), (s) -> "", (s) -> null, properties) { @Override @@ -817,7 +936,9 @@ public class Setting extends ToXContentToBytes { @Override public void apply(Settings value, Settings current, Settings previous) { - logger.info("updating [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current)); + if (logger.isInfoEnabled()) { // getRaw can create quite some objects + logger.info("updating [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current)); + } consumer.accept(value); } @@ -885,59 +1006,24 @@ public class Setting extends ToXContentToBytes { * can easily be added with this setting. Yet, prefix key settings don't support updaters out of the box unless * {@link #getConcreteSetting(String)} is used to pull the updater. */ - public static Setting prefixKeySetting(String prefix, String defaultValue, Function parser, - Property... properties) { - return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, properties); + public static AffixSetting prefixKeySetting(String prefix, Function> delegateFactory) { + return affixKeySetting(new AffixKey(prefix), delegateFactory); } /** * This setting type allows to validate settings that have the same type and a common prefix and suffix. For instance - * storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, adfix key settings don't support updaters + * storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, affix key settings don't support updaters * out of the box unless {@link #getConcreteSetting(String)} is used to pull the updater. */ - public static Setting affixKeySetting(String prefix, String suffix, Function defaultValue, - Function parser, Property... properties) { - return affixKeySetting(AffixKey.withAffix(prefix, suffix), defaultValue, parser, properties); + public static AffixSetting affixKeySetting(String prefix, String suffix, Function> delegateFactory) { + return affixKeySetting(new AffixKey(prefix, suffix), delegateFactory); } - public static Setting affixKeySetting(String prefix, String suffix, String defaultValue, Function parser, - Property... properties) { - return affixKeySetting(prefix, suffix, (s) -> defaultValue, parser, properties); - } + private static AffixSetting affixKeySetting(AffixKey key, Function> delegateFactory) { + Setting delegate = delegateFactory.apply("_na_"); + return new AffixSetting<>(key, delegate, delegateFactory); + }; - public static Setting affixKeySetting(AffixKey key, Function defaultValue, Function parser, - Property... properties) { - return new Setting(key, defaultValue, parser, properties) { - - @Override - boolean isGroupSetting() { - return true; - } - - @Override - AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, Logger logger, Consumer validator) { - throw new UnsupportedOperationException("Affix settings can't be updated. Use #getConcreteSetting for updating."); - } - - @Override - public Setting getConcreteSetting(String key) { - if (match(key)) { - return new Setting<>(key, defaultValue, parser, properties); - } else { - throw new IllegalArgumentException("key [" + key + "] must match [" + getKey() + "] but didn't."); - } - } - - @Override - public void diff(Settings.Builder builder, Settings source, Settings defaultSettings) { - for (Map.Entry entry : defaultSettings.getAsMap().entrySet()) { - if (match(entry.getKey())) { - getConcreteSetting(entry.getKey()).diff(builder, source, defaultSettings); - } - } - } - }; - } public interface Key { @@ -1003,37 +1089,60 @@ public class Setting extends ToXContentToBytes { } } + /** + * A key that allows for static pre and suffix. This is used for settings + * that have dynamic namespaces like for different accounts etc. + */ public static final class AffixKey implements Key { - public static AffixKey withPrefix(String prefix) { - return new AffixKey(prefix, null); - } - - public static AffixKey withAffix(String prefix, String suffix) { - return new AffixKey(prefix, suffix); - } - + private final Pattern pattern; private final String prefix; private final String suffix; - public AffixKey(String prefix, String suffix) { + AffixKey(String prefix) { + this(prefix, null); + } + + AffixKey(String prefix, String suffix) { assert prefix != null || suffix != null: "Either prefix or suffix must be non-null"; + this.prefix = prefix; if (prefix.endsWith(".") == false) { throw new IllegalArgumentException("prefix must end with a '.'"); } this.suffix = suffix; + if (suffix == null) { + pattern = Pattern.compile("(" + Pattern.quote(prefix) + "((?:[-\\w]+[.])*[-\\w]+$))"); + } else { + // the last part of this regexp is for lists since they are represented as x.${namespace}.y.1, x.${namespace}.y.2 + pattern = Pattern.compile("(" + Pattern.quote(prefix) + "([-\\w]+)\\." + Pattern.quote(suffix) + ")(?:\\.\\d+)?"); + } } @Override public boolean match(String key) { - boolean match = true; - if (prefix != null) { - match = key.startsWith(prefix); + return pattern.matcher(key).matches(); + } + + /** + * Returns a string representation of the concrete setting key + */ + String getConcreteString(String key) { + Matcher matcher = pattern.matcher(key); + if (matcher.matches() == false) { + throw new IllegalStateException("can't get concrete string for key " + key + " key doesn't match"); } - if (suffix != null) { - match = match && key.endsWith(suffix); + return matcher.group(1); + } + + /** + * Returns a string representation of the concrete setting key + */ + String getNamespace(String key) { + Matcher matcher = pattern.matcher(key); + if (matcher.matches() == false) { + throw new IllegalStateException("can't get concrete string for key " + key + " key doesn't match"); } - return match; + return matcher.group(2); } public SimpleKey toConcreteKey(String missingPart) { diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index 819edc246ac..579be7ce31f 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -19,12 +19,14 @@ package org.elasticsearch.common.settings; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.settings.loader.SettingsLoaderFactory; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -35,6 +37,7 @@ import org.elasticsearch.common.unit.SizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.io.InputStream; @@ -42,6 +45,9 @@ import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.security.GeneralSecurityException; +import java.util.AbstractMap; +import java.util.AbstractSet; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -52,16 +58,15 @@ import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; import java.util.Objects; import java.util.Set; -import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Predicate; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.stream.Collectors; import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue; import static org.elasticsearch.common.unit.SizeValue.parseSizeValue; @@ -75,11 +80,24 @@ public final class Settings implements ToXContent { public static final Settings EMPTY = new Builder().build(); private static final Pattern ARRAY_PATTERN = Pattern.compile("(.*)\\.\\d+$"); - private SortedMap settings; + /** The raw settings from the full key to raw string value. */ + private Map settings; - Settings(Map settings) { + /** The secure settings storage associated with these settings. */ + private SecureSettings secureSettings; + + Settings(Map settings, SecureSettings secureSettings) { // we use a sorted map for consistent serialization when using getAsMap() this.settings = Collections.unmodifiableSortedMap(new TreeMap<>(settings)); + this.secureSettings = secureSettings; + } + + /** + * Retrieve the secure settings in these settings. + */ + SecureSettings getSecureSettings() { + // pkg private so it can only be accessed by local subclasses of SecureSetting + return secureSettings; } /** @@ -87,7 +105,8 @@ public final class Settings implements ToXContent { * @return an unmodifiable map of settings */ public Map getAsMap() { - return Collections.unmodifiableMap(this.settings); + // settings is always unmodifiable + return this.settings; } /** @@ -186,30 +205,16 @@ public final class Settings implements ToXContent { * A settings that are filtered (and key is removed) with the specified prefix. */ public Settings getByPrefix(String prefix) { - Builder builder = new Builder(); - for (Map.Entry entry : getAsMap().entrySet()) { - if (entry.getKey().startsWith(prefix)) { - if (entry.getKey().length() < prefix.length()) { - // ignore this. one - continue; - } - builder.put(entry.getKey().substring(prefix.length()), entry.getValue()); - } - } - return builder.build(); + return new Settings(new FilteredMap(this.settings, (k) -> k.startsWith(prefix), prefix), + secureSettings == null ? null : new PrefixedSecureSettings(secureSettings, prefix)); } /** * Returns a new settings object that contains all setting of the current one filtered by the given settings key predicate. + * Secure settings may not be accessed through a filter. */ public Settings filter(Predicate predicate) { - Builder builder = new Builder(); - for (Map.Entry entry : getAsMap().entrySet()) { - if (predicate.test(entry.getKey())) { - builder.put(entry.getKey(), entry.getValue()); - } - } - return builder.build(); + return new Settings(new FilteredMap(this.settings, predicate, null), null); } /** @@ -310,6 +315,36 @@ public final class Settings implements ToXContent { return Booleans.parseBoolean(get(setting), defaultValue); } + // TODO #22298: Delete this method and update call sites to #getAsBoolean(String, Boolean). + /** + * Returns the setting value (as boolean) associated with the setting key. If it does not exist, returns the default value provided. + * If the index was created on Elasticsearch below 6.0, booleans will be parsed leniently otherwise they are parsed strictly. + * + * See {@link Booleans#isBooleanLenient(char[], int, int)} for the definition of a "lenient boolean" + * and {@link Booleans#isBoolean(char[], int, int)} for the definition of a "strict boolean". + * + * @deprecated Only used to provide automatic upgrades for pre 6.0 indices. + */ + @Deprecated + public Boolean getAsBooleanLenientForPreEs6Indices( + final Version indexVersion, + final String setting, + final Boolean defaultValue, + final DeprecationLogger deprecationLogger) { + if (indexVersion.before(Version.V_6_0_0_alpha1_UNRELEASED)) { + //Only emit a warning if the setting's value is not a proper boolean + final String value = get(setting, "false"); + if (Booleans.isBoolean(value) == false) { + @SuppressWarnings("deprecation") + boolean convertedValue = Booleans.parseBooleanLenient(get(setting), defaultValue); + deprecationLogger.deprecated("The value [{}] of setting [{}] is not coerced into boolean anymore. Please change " + + "this value to [{}].", value, setting, String.valueOf(convertedValue)); + return convertedValue; + } + } + return getAsBoolean(setting, defaultValue); + } + /** * Returns the setting value (as time) associated with the setting key. If it does not exists, * returns the default value provided. @@ -443,6 +478,7 @@ public final class Settings implements ToXContent { } return getGroupsInternal(settingPrefix, ignoreNonGrouped); } + private Map getGroupsInternal(String settingPrefix, boolean ignoreNonGrouped) throws SettingsException { // we don't really care that it might happen twice Map> map = new LinkedHashMap<>(); @@ -470,7 +506,7 @@ public final class Settings implements ToXContent { } Map retVal = new LinkedHashMap<>(); for (Map.Entry> entry : map.entrySet()) { - retVal.put(entry.getKey(), new Settings(Collections.unmodifiableMap(entry.getValue()))); + retVal.put(entry.getKey(), new Settings(Collections.unmodifiableMap(entry.getValue()), secureSettings)); } return Collections.unmodifiableMap(retVal); } @@ -553,7 +589,7 @@ public final class Settings implements ToXContent { } public static void writeSettingsToStream(Settings settings, StreamOutput out) throws IOException { - out.writeVInt(settings.getAsMap().size()); + out.writeVInt(settings.size()); for (Map.Entry entry : settings.getAsMap().entrySet()) { out.writeString(entry.getKey()); out.writeOptionalString(entry.getValue()); @@ -590,7 +626,12 @@ public final class Settings implements ToXContent { * @return true if this settings object contains no settings */ public boolean isEmpty() { - return this.settings.isEmpty(); + return this.settings.isEmpty(); // TODO: account for secure settings + } + + /** Returns the number of settings in this settings object. */ + public int size() { + return this.settings.size(); // TODO: account for secure settings } /** @@ -602,7 +643,10 @@ public final class Settings implements ToXContent { public static final Settings EMPTY_SETTINGS = new Builder().build(); - private final Map map = new LinkedHashMap<>(); + // we use a sorted map for consistent serialization when using getAsMap() + private final Map map = new TreeMap<>(); + + private SetOnce secureSettings = new SetOnce<>(); private Builder() { @@ -626,6 +670,14 @@ public final class Settings implements ToXContent { return map.get(key); } + public Builder setSecureSettings(SecureSettings secureSettings) { + if (secureSettings.isLoaded() == false) { + throw new IllegalStateException("Secure settings must already be loaded"); + } + this.secureSettings.set(secureSettings); + return this; + } + /** * Puts tuples of key value pairs of settings. Simplified version instead of repeating calling * put for each one. @@ -850,6 +902,9 @@ public final class Settings implements ToXContent { public Builder put(Settings settings) { removeNonArraysFieldsIfNewSettingsContainsFieldAsArray(settings.getAsMap()); map.putAll(settings.getAsMap()); + if (settings.getSecureSettings() != null) { + setSecureSettings(settings.getSecureSettings()); + } return this; } @@ -906,7 +961,9 @@ public final class Settings implements ToXContent { /** * Loads settings from the actual string content that represents them using the * {@link SettingsLoaderFactory#loaderFromSource(String)}. + * @deprecated use {@link #loadFromSource(String, XContentType)} to avoid content type detection */ + @Deprecated public Builder loadFromSource(String source) { SettingsLoader settingsLoader = SettingsLoaderFactory.loaderFromSource(source); try { @@ -918,9 +975,24 @@ public final class Settings implements ToXContent { return this; } + /** + * Loads settings from the actual string content that represents them using the + * {@link SettingsLoaderFactory#loaderFromXContentType(XContentType)} method to obtain a loader + */ + public Builder loadFromSource(String source, XContentType xContentType) { + SettingsLoader settingsLoader = SettingsLoaderFactory.loaderFromXContentType(xContentType); + try { + Map loadedSettings = settingsLoader.load(source); + put(loadedSettings); + } catch (Exception e) { + throw new SettingsException("Failed to load settings from [" + source + "]", e); + } + return this; + } + /** * Loads settings from a url that represents them using the - * {@link SettingsLoaderFactory#loaderFromSource(String)}. + * {@link SettingsLoaderFactory#loaderFromResource(String)}. */ public Builder loadFromPath(Path path) throws IOException { // NOTE: loadFromStream will close the input stream @@ -929,7 +1001,7 @@ public final class Settings implements ToXContent { /** * Loads settings from a stream that represents them using the - * {@link SettingsLoaderFactory#loaderFromSource(String)}. + * {@link SettingsLoaderFactory#loaderFromResource(String)}. */ public Builder loadFromStream(String resourceName, InputStream is) throws IOException { SettingsLoader settingsLoader = SettingsLoaderFactory.loaderFromResource(resourceName); @@ -1032,7 +1104,154 @@ public final class Settings implements ToXContent { * set on this builder. */ public Settings build() { - return new Settings(Collections.unmodifiableMap(map)); + return new Settings(map, secureSettings.get()); + } + } + + // TODO We could use an FST internally to make things even faster and more compact + private static final class FilteredMap extends AbstractMap { + private final Map delegate; + private final Predicate filter; + private final String prefix; + // we cache that size since we have to iterate the entire set + // this is safe to do since this map is only used with unmodifiable maps + private int size = -1; + @Override + public Set> entrySet() { + Set> delegateSet = delegate.entrySet(); + AbstractSet> filterSet = new AbstractSet>() { + + @Override + public Iterator> iterator() { + Iterator> iter = delegateSet.iterator(); + + return new Iterator>() { + private int numIterated; + private Entry currentElement; + @Override + public boolean hasNext() { + if (currentElement != null) { + return true; // protect against calling hasNext twice + } else { + if (numIterated == size) { // early terminate + assert size != -1 : "size was never set: " + numIterated + " vs. " + size; + return false; + } + while (iter.hasNext()) { + if (filter.test((currentElement = iter.next()).getKey())) { + numIterated++; + return true; + } + } + // we didn't find anything + currentElement = null; + return false; + } + } + + @Override + public Entry next() { + if (currentElement == null && hasNext() == false) { // protect against no #hasNext call or not respecting it + + throw new NoSuchElementException("make sure to call hasNext first"); + } + final Entry current = this.currentElement; + this.currentElement = null; + if (prefix == null) { + return current; + } + return new Entry() { + @Override + public String getKey() { + return current.getKey().substring(prefix.length()); + } + + @Override + public String getValue() { + return current.getValue(); + } + + @Override + public String setValue(String value) { + throw new UnsupportedOperationException(); + } + }; + } + }; + } + + @Override + public int size() { + return FilteredMap.this.size(); + } + }; + return filterSet; + } + + private FilteredMap(Map delegate, Predicate filter, String prefix) { + this.delegate = delegate; + this.filter = filter; + this.prefix = prefix; + } + + @Override + public String get(Object key) { + if (key instanceof String) { + final String theKey = prefix == null ? (String)key : prefix + key; + if (filter.test(theKey)) { + return delegate.get(theKey); + } + } + return null; + } + + @Override + public boolean containsKey(Object key) { + if (key instanceof String) { + final String theKey = prefix == null ? (String) key : prefix + key; + if (filter.test(theKey)) { + return delegate.containsKey(theKey); + } + } + return false; + } + + @Override + public int size() { + if (size == -1) { + size = Math.toIntExact(delegate.keySet().stream().filter((e) -> filter.test(e)).count()); + } + return size; + } + } + + private static class PrefixedSecureSettings implements SecureSettings { + private SecureSettings delegate; + private String prefix; + + PrefixedSecureSettings(SecureSettings delegate, String prefix) { + this.delegate = delegate; + this.prefix = prefix; + } + + @Override + public boolean isLoaded() { + return delegate.isLoaded(); + } + + @Override + public boolean hasSetting(String setting) { + return delegate.hasSetting(prefix + setting); + } + + @Override + public SecureString getString(String setting) throws GeneralSecurityException{ + return delegate.getString(prefix + setting); + } + + @Override + public void close() throws IOException { + delegate.close(); } } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 60276ce14f7..45a38ce34a5 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -54,6 +54,7 @@ public class SettingsModule implements Module { private final Logger logger; private final IndexScopedSettings indexScopedSettings; private final ClusterSettings clusterSettings; + private final SettingsFilter settingsFilter; public SettingsModule(Settings settings, Setting... additionalSettings) { this(settings, Arrays.asList(additionalSettings), Collections.emptyList()); @@ -137,12 +138,13 @@ public class SettingsModule implements Module { final Predicate acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.negate(); clusterSettings.validate(settings.filter(acceptOnlyClusterSettings)); validateTribeSettings(settings, clusterSettings); + this.settingsFilter = new SettingsFilter(settings, settingsFilterPattern); } @Override public void configure(Binder binder) { binder.bind(Settings.class).toInstance(settings); - binder.bind(SettingsFilter.class).toInstance(new SettingsFilter(settings, settingsFilterPattern)); + binder.bind(SettingsFilter.class).toInstance(settingsFilter); binder.bind(ClusterSettings.class).toInstance(clusterSettings); binder.bind(IndexScopedSettings.class).toInstance(indexScopedSettings); } @@ -218,4 +220,8 @@ public class SettingsModule implements Module { public ClusterSettings getClusterSettings() { return clusterSettings; } + + public SettingsFilter getSettingsFilter() { + return settingsFilter; + } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java b/core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java index 5f2da22c5f2..5d8cb4918b2 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java +++ b/core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.settings.loader; +import org.elasticsearch.common.xcontent.XContentType; + /** * A class holding factory methods for settings loaders that attempts * to infer the type of the underlying settings content. @@ -33,9 +35,7 @@ public final class SettingsLoaderFactory { * name. This factory method assumes that if the resource name ends * with ".json" then the content should be parsed as JSON, else if * the resource name ends with ".yml" or ".yaml" then the content - * should be parsed as YAML, else if the resource name ends with - * ".properties" then the content should be parsed as properties, - * otherwise default to attempting to parse as JSON. Note that the + * should be parsed as YAML, otherwise throws an exception. Note that the * parsers returned by this method will not accept null-valued * keys. * @@ -59,13 +59,15 @@ public final class SettingsLoaderFactory { * contains an opening and closing brace ('{' and '}') then the * content should be parsed as JSON, else if the underlying content * fails this condition but contains a ':' then the content should - * be parsed as YAML, and otherwise should be parsed as properties. + * be parsed as YAML, and otherwise throws an exception. * Note that the JSON and YAML parsers returned by this method will * accept null-valued keys. * * @param source The underlying settings content. * @return A settings loader. + * @deprecated use {@link #loaderFromXContentType(XContentType)} instead */ + @Deprecated public static SettingsLoader loaderFromSource(String source) { if (source.indexOf('{') != -1 && source.indexOf('}') != -1) { return new JsonSettingsLoader(true); @@ -76,4 +78,20 @@ public final class SettingsLoaderFactory { } } + /** + * Returns a {@link SettingsLoader} based on the {@link XContentType}. Note only {@link XContentType#JSON} and + * {@link XContentType#YAML} are supported + * + * @param xContentType The content type + * @return A settings loader. + */ + public static SettingsLoader loaderFromXContentType(XContentType xContentType) { + if (xContentType == XContentType.JSON) { + return new JsonSettingsLoader(true); + } else if (xContentType == XContentType.YAML) { + return new YamlSettingsLoader(true); + } else { + throw new IllegalArgumentException("unsupported content type [" + xContentType + "]"); + } + } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java index 30c62b91c79..d7eaa627a28 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java +++ b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.settings.loader; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -46,14 +47,16 @@ public abstract class XContentSettingsLoader implements SettingsLoader { @Override public Map load(String source) throws IOException { - try (XContentParser parser = XContentFactory.xContent(contentType()).createParser(source)) { + // It is safe to use EMPTY here because this never uses namedObject + try (XContentParser parser = XContentFactory.xContent(contentType()).createParser(NamedXContentRegistry.EMPTY, source)) { return load(parser); } } @Override public Map load(byte[] source) throws IOException { - try (XContentParser parser = XContentFactory.xContent(contentType()).createParser(source)) { + // It is safe to use EMPTY here because this never uses namedObject + try (XContentParser parser = XContentFactory.xContent(contentType()).createParser(NamedXContentRegistry.EMPTY, source)) { return load(parser); } } diff --git a/core/src/main/java/org/elasticsearch/common/text/Text.java b/core/src/main/java/org/elasticsearch/common/text/Text.java index 39eb817fe3c..d895b7c11b0 100644 --- a/core/src/main/java/org/elasticsearch/common/text/Text.java +++ b/core/src/main/java/org/elasticsearch/common/text/Text.java @@ -100,7 +100,10 @@ public final class Text implements Comparable { @Override public boolean equals(Object obj) { - if (obj == null) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { return false; } return bytes().equals(((Text) obj).bytes()); diff --git a/core/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/core/src/main/java/org/elasticsearch/common/util/BigByteArray.java index cac3132385f..02f2c627a4c 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigByteArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigByteArray.java @@ -36,7 +36,7 @@ final class BigByteArray extends AbstractBigArray implements ByteArray { private byte[][] pages; /** Constructor. */ - public BigByteArray(long size, BigArrays bigArrays, boolean clearOnResize) { + BigByteArray(long size, BigArrays bigArrays, boolean clearOnResize) { super(BYTE_PAGE_SIZE, bigArrays, clearOnResize); this.size = size; pages = new byte[numPages(size)][]; diff --git a/core/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java b/core/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java index 4aab593affe..a8b4503bda6 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java @@ -35,7 +35,7 @@ final class BigDoubleArray extends AbstractBigArray implements DoubleArray { private long[][] pages; /** Constructor. */ - public BigDoubleArray(long size, BigArrays bigArrays, boolean clearOnResize) { + BigDoubleArray(long size, BigArrays bigArrays, boolean clearOnResize) { super(LONG_PAGE_SIZE, bigArrays, clearOnResize); this.size = size; pages = new long[numPages(size)][]; diff --git a/core/src/main/java/org/elasticsearch/common/util/BigFloatArray.java b/core/src/main/java/org/elasticsearch/common/util/BigFloatArray.java index 1fa79a9f3db..cf11eba37ae 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigFloatArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigFloatArray.java @@ -35,7 +35,7 @@ final class BigFloatArray extends AbstractBigArray implements FloatArray { private int[][] pages; /** Constructor. */ - public BigFloatArray(long size, BigArrays bigArrays, boolean clearOnResize) { + BigFloatArray(long size, BigArrays bigArrays, boolean clearOnResize) { super(INT_PAGE_SIZE, bigArrays, clearOnResize); this.size = size; pages = new int[numPages(size)][]; diff --git a/core/src/main/java/org/elasticsearch/common/util/BigIntArray.java b/core/src/main/java/org/elasticsearch/common/util/BigIntArray.java index 4ce5fc7acee..16ca3ada24d 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigIntArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigIntArray.java @@ -35,7 +35,7 @@ final class BigIntArray extends AbstractBigArray implements IntArray { private int[][] pages; /** Constructor. */ - public BigIntArray(long size, BigArrays bigArrays, boolean clearOnResize) { + BigIntArray(long size, BigArrays bigArrays, boolean clearOnResize) { super(INT_PAGE_SIZE, bigArrays, clearOnResize); this.size = size; pages = new int[numPages(size)][]; diff --git a/core/src/main/java/org/elasticsearch/common/util/BigLongArray.java b/core/src/main/java/org/elasticsearch/common/util/BigLongArray.java index 2e3248143b4..cb9b9e6c332 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigLongArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigLongArray.java @@ -35,7 +35,7 @@ final class BigLongArray extends AbstractBigArray implements LongArray { private long[][] pages; /** Constructor. */ - public BigLongArray(long size, BigArrays bigArrays, boolean clearOnResize) { + BigLongArray(long size, BigArrays bigArrays, boolean clearOnResize) { super(LONG_PAGE_SIZE, bigArrays, clearOnResize); this.size = size; pages = new long[numPages(size)][]; diff --git a/core/src/main/java/org/elasticsearch/common/util/BigObjectArray.java b/core/src/main/java/org/elasticsearch/common/util/BigObjectArray.java index 19a41d3096d..023c710f3aa 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigObjectArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigObjectArray.java @@ -35,7 +35,7 @@ final class BigObjectArray extends AbstractBigArray implements ObjectArray private Object[][] pages; /** Constructor. */ - public BigObjectArray(long size, BigArrays bigArrays) { + BigObjectArray(long size, BigArrays bigArrays) { super(OBJECT_PAGE_SIZE, bigArrays, true); this.size = size; pages = new Object[numPages(size)][]; diff --git a/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 5171be0ee91..54a49f7e4f2 100644 --- a/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -226,7 +226,7 @@ public class CollectionUtils { private final List in; private final int distance; - public RotatedList(List list, int distance) { + RotatedList(List list, int distance) { if (distance < 0 || distance >= list.size()) { throw new IllegalArgumentException(); } diff --git a/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java index 2712aef8233..528982385ac 100644 --- a/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java +++ b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java @@ -26,6 +26,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -86,7 +87,7 @@ public class IndexFolderUpgrader { void upgrade(final String indexFolderName) throws IOException { for (NodeEnvironment.NodePath nodePath : nodeEnv.nodePaths()) { final Path indexFolderPath = nodePath.indicesPath.resolve(indexFolderName); - final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, indexFolderPath); + final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, indexFolderPath); if (indexMetaData != null) { final Index index = indexMetaData.getIndex(); if (needsUpgrade(index, indexFolderName)) { diff --git a/core/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java b/core/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java index 4095f5d7014..a79e8d88be6 100644 --- a/core/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java +++ b/core/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java @@ -161,7 +161,7 @@ public class LongObjectPagedHashMap extends AbstractPagedHashMap implements I } @Override - public final void remove() { + public void remove() { throw new UnsupportedOperationException(); } diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java index d5b44ed4dfb..e0b8aea178c 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.common.util.concurrent; import org.apache.lucene.store.AlreadyClosedException; diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 2dad4663dc4..bd99f3b1a47 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -161,7 +161,7 @@ public class EsExecutors { final AtomicInteger threadNumber = new AtomicInteger(1); final String namePrefix; - public EsThreadFactory(String namePrefix) { + EsThreadFactory(String namePrefix) { this.namePrefix = namePrefix; SecurityManager s = System.getSecurityManager(); group = (s != null) ? s.getThreadGroup() : @@ -189,7 +189,7 @@ public class EsExecutors { ThreadPoolExecutor executor; - public ExecutorScalingQueue() { + ExecutorScalingQueue() { } @Override diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java index 813265f19c4..1b01455c1ca 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java @@ -250,14 +250,14 @@ public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor { final Priority priority; final long insertionOrder; - public PrioritizedFutureTask(Runnable runnable, Priority priority, T value, long insertionOrder) { + PrioritizedFutureTask(Runnable runnable, Priority priority, T value, long insertionOrder) { super(runnable, value); this.task = runnable; this.priority = priority; this.insertionOrder = insertionOrder; } - public PrioritizedFutureTask(PrioritizedCallable callable, long insertionOrder) { + PrioritizedFutureTask(PrioritizedCallable callable, long insertionOrder) { super(callable); this.task = callable; this.priority = callable.priority(); diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index ca1d364ffd2..d439696b720 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.store.Store; import java.io.Closeable; import java.io.IOException; @@ -34,6 +35,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with @@ -115,12 +119,57 @@ public final class ThreadContext implements Closeable, Writeable { return () -> threadLocal.set(context); } + /** * Just like {@link #stashContext()} but no default context is set. + * @param preserveResponseHeaders if set to true the response headers of the restore thread will be preserved. */ - public StoredContext newStoredContext() { + public StoredContext newStoredContext(boolean preserveResponseHeaders) { final ThreadContextStruct context = threadLocal.get(); - return () -> threadLocal.set(context); + return () -> { + if (preserveResponseHeaders && threadLocal.get() != context) { + threadLocal.set(context.putResponseHeaders(threadLocal.get().responseHeaders)); + } else { + threadLocal.set(context); + } + }; + } + + /** + * Returns a supplier that gathers a {@link #newStoredContext(boolean)} and restores it once the + * returned supplier is invoked. The context returned from the supplier is a stored version of the + * suppliers callers context that should be restored once the originally gathered context is not needed anymore. + * For instance this method should be used like this: + * + *
    +     *     Supplier<ThreadContext.StoredContext> restorable = context.newRestorableContext(true);
    +     *     new Thread() {
    +     *         public void run() {
    +     *             try (ThreadContext.StoredContext ctx = restorable.get()) {
    +     *                 // execute with the parents context and restore the threads context afterwards
    +     *             }
    +     *         }
    +     *
    +     *     }.start();
    +     * 
    + * + * @param preserveResponseHeaders if set to true the response headers of the restore thread will be preserved. + * @return a restorable context supplier + */ + public Supplier newRestorableContext(boolean preserveResponseHeaders) { + return wrapRestorable(newStoredContext(preserveResponseHeaders)); + } + + /** + * Same as {@link #newRestorableContext(boolean)} but wraps an existing context to restore. + * @param storedContext the context to restore + */ + public Supplier wrapRestorable(StoredContext storedContext) { + return () -> { + StoredContext context = newStoredContext(false); + storedContext.restore(); + return context; + }; } @Override @@ -327,6 +376,26 @@ public final class ThreadContext implements Closeable, Writeable { } } + private ThreadContextStruct putResponseHeaders(Map> headers) { + assert headers != null; + if (headers.isEmpty()) { + return this; + } + final Map> newResponseHeaders = new HashMap<>(this.responseHeaders); + for (Map.Entry> entry : headers.entrySet()) { + String key = entry.getKey(); + final List existingValues = newResponseHeaders.get(key); + if (existingValues != null) { + List newValues = Stream.concat(entry.getValue().stream(), + existingValues.stream()).distinct().collect(Collectors.toList()); + newResponseHeaders.put(key, Collections.unmodifiableList(newValues)); + } else { + newResponseHeaders.put(key, entry.getValue()); + } + } + return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders); + } + private ThreadContextStruct putResponse(String key, String value) { assert value != null; @@ -445,7 +514,7 @@ public final class ThreadContext implements Closeable, Writeable { private final ThreadContext.StoredContext ctx; private ContextPreservingRunnable(Runnable in) { - ctx = newStoredContext(); + ctx = newStoredContext(false); this.in = in; } @@ -487,7 +556,7 @@ public final class ThreadContext implements Closeable, Writeable { private ThreadContext.StoredContext threadsOriginalContext = null; private ContextPreservingAbstractRunnable(AbstractRunnable in) { - creatorsContext = newStoredContext(); + creatorsContext = newStoredContext(false); this.in = in; } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java index 64a5fa4c119..91acb267056 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java @@ -19,8 +19,8 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -34,17 +34,9 @@ import java.util.function.BiFunction; /** * Superclass for {@link ObjectParser} and {@link ConstructingObjectParser}. Defines most of the "declare" methods so they can be shared. */ -public abstract class AbstractObjectParser +public abstract class AbstractObjectParser implements BiFunction, ContextParser { - /** - * Reads an object right from the parser without any context. - */ - @FunctionalInterface - public interface NoContextParser { - T parse(XContentParser p) throws IOException; - } - /** * Declare some field. Usually it is easier to use {@link #declareString(BiConsumer, ParseField)} or * {@link #declareObject(BiConsumer, ContextParser, ParseField)} rather than call this directly. @@ -52,11 +44,12 @@ public abstract class AbstractObjectParser void declareField(BiConsumer consumer, ContextParser parser, ParseField parseField, ValueType type); - public void declareField(BiConsumer consumer, NoContextParser parser, ParseField parseField, ValueType type) { + public void declareField(BiConsumer consumer, CheckedFunction parser, + ParseField parseField, ValueType type) { if (parser == null) { throw new IllegalArgumentException("[parser] is required"); } - declareField(consumer, (p, c) -> parser.parse(p), parseField, type); + declareField(consumer, (p, c) -> parser.apply(p), parseField, type); } public void declareObject(BiConsumer consumer, ContextParser objectParser, ParseField field) { @@ -122,7 +115,7 @@ public abstract class AbstractObjectParser consumer, ParseField field) { - NoContextParser bytesParser = p -> { + CheckedFunction bytesParser = p -> { try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.prettyPrint(); builder.copyCurrentStructure(p); diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java index 6e646094d06..82ee94550c1 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.xcontent; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; @@ -74,7 +73,7 @@ import java.util.function.Function; * Note: if optional constructor arguments aren't specified then the number of allocations is always the worst case. *

    */ -public final class ConstructingObjectParser extends AbstractObjectParser { +public final class ConstructingObjectParser extends AbstractObjectParser { /** * Consumer that marks a field as a required constructor argument instead of a real object field. */ @@ -236,7 +235,7 @@ public final class ConstructingObjectParser categoryClass; + + /** A name for the entry which is unique within the {@link #categoryClass}. */ + public final ParseField name; + + /** A parser capability of parser the entry's class. */ + private final ContextParser parser; + + /** Creates a new entry which can be stored by the registry. */ + public Entry(Class categoryClass, ParseField name, CheckedFunction parser) { + this.categoryClass = Objects.requireNonNull(categoryClass); + this.name = Objects.requireNonNull(name); + this.parser = Objects.requireNonNull((p, c) -> parser.apply(p)); + } + /** + * Creates a new entry which can be stored by the registry. + * @deprecated prefer {@link Entry#Entry(Class, ParseField, CheckedFunction)}. Contexts will be removed when possible + */ + @Deprecated + public Entry(Class categoryClass, ParseField name, ContextParser parser) { + this.categoryClass = Objects.requireNonNull(categoryClass); + this.name = Objects.requireNonNull(name); + this.parser = Objects.requireNonNull(parser); + } + } + + private final Map, Map> registry; + + public NamedXContentRegistry(List entries) { + if (entries.isEmpty()) { + registry = emptyMap(); + return; + } + entries = new ArrayList<>(entries); + entries.sort((e1, e2) -> e1.categoryClass.getName().compareTo(e2.categoryClass.getName())); + + Map, Map> registry = new HashMap<>(); + Map parsers = null; + Class currentCategory = null; + for (Entry entry : entries) { + if (currentCategory != entry.categoryClass) { + if (currentCategory != null) { + // we've seen the last of this category, put it into the big map + registry.put(currentCategory, unmodifiableMap(parsers)); + } + parsers = new HashMap<>(); + currentCategory = entry.categoryClass; + } + + for (String name : entry.name.getAllNamesIncludedDeprecated()) { + Object old = parsers.put(name, entry); + if (old != null) { + throw new IllegalArgumentException("NamedXContent [" + currentCategory.getName() + "][" + entry.name + "]" + + " is already registered for [" + old.getClass().getName() + "]," + + " cannot register [" + entry.parser.getClass().getName() + "]"); + } + } + } + // handle the last category + registry.put(currentCategory, unmodifiableMap(parsers)); + + this.registry = unmodifiableMap(registry); + } + + /** + * Parse a named object, throwing an exception if the parser isn't found. Throws an {@link ElasticsearchException} if the + * {@code categoryClass} isn't registered because this is almost always a bug. Throws a {@link UnknownNamedObjectException} if the + * {@code categoryClass} is registered but the {@code name} isn't. + */ + public T parseNamedObject(Class categoryClass, String name, XContentParser parser, C context) throws IOException { + Map parsers = registry.get(categoryClass); + if (parsers == null) { + if (registry.isEmpty()) { + // The "empty" registry will never work so we throw a better exception as a hint. + throw new ElasticsearchException("namedObject is not supported for this parser"); + } + throw new ElasticsearchException("Unknown namedObject category [" + categoryClass.getName() + "]"); + } + Entry entry = parsers.get(name); + if (entry == null) { + throw new UnknownNamedObjectException(parser.getTokenLocation(), categoryClass, name); + } + if (false == entry.name.match(name)) { + /* Note that this shouldn't happen because we already looked up the entry using the names but we need to call `match` anyway + * because it is responsible for logging deprecation warnings. */ + throw new ParsingException(parser.getTokenLocation(), + "Unknown " + categoryClass.getSimpleName() + " [" + name + "]: Parser didn't match"); + } + return categoryClass.cast(entry.parser.parse(parser, context)); + } + + /** + * Thrown when {@link NamedXContentRegistry#parseNamedObject(Class, String, XContentParser, Object)} is called with an unregistered + * name. When this bubbles up to the rest layer it is converted into a response with {@code 400 BAD REQUEST} status. + */ + public static class UnknownNamedObjectException extends ParsingException { + private final String categoryClass; + private final String name; + + public UnknownNamedObjectException(XContentLocation contentLocation, Class categoryClass, + String name) { + super(contentLocation, "Unknown " + categoryClass.getSimpleName() + " [" + name + "]"); + this.categoryClass = requireNonNull(categoryClass, "categoryClass is required").getName(); + this.name = requireNonNull(name, "name is required"); + } + + /** + * Read from a stream. + */ + public UnknownNamedObjectException(StreamInput in) throws IOException { + super(in); + categoryClass = in.readString(); + name = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(categoryClass); + out.writeString(name); + } + + /** + * Category class that was missing a parser. This is a String instead of a class because the class might not be on the classpath + * of all nodes or it might be exclusive to a plugin or something. + */ + public String getCategoryClass() { + return categoryClass; + } + + /** + * Name of the missing parser. + */ + public String getName() { + return name; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java index 8bd3b634d69..0a7e71c6f06 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java @@ -20,8 +20,6 @@ package org.elasticsearch.common.xcontent; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.ParsingException; import java.io.IOException; @@ -68,7 +66,7 @@ import static org.elasticsearch.common.xcontent.XContentParser.Token.VALUE_STRIN * It's highly recommended to use the high level declare methods like {@link #declareString(BiConsumer, ParseField)} instead of * {@link #declareField} which can be used to implement exceptional parsing operations not covered by the high level methods. */ -public final class ObjectParser extends AbstractObjectParser { +public final class ObjectParser extends AbstractObjectParser { /** * Adapts an array (or varags) setter into a list setter. */ @@ -122,7 +120,7 @@ public final class ObjectParser fieldParser = null; + FieldParser fieldParser = null; String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -167,7 +165,7 @@ public final class ObjectParser fieldParser, String currentFieldName, Value value, Context context) + private void parseArray(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) throws IOException { assert parser.currentToken() == XContentParser.Token.START_ARRAY : "Token was: " + parser.currentToken(); parseValue(parser, fieldParser, currentFieldName, value, context); } - private void parseValue(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) + private void parseValue(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) throws IOException { try { fieldParser.parser.parse(parser, value, context); @@ -372,7 +370,7 @@ public final class ObjectParser fieldParser, String currentFieldName, Value value, Context context) + private void parseSub(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) throws IOException { final XContentParser.Token token = parser.currentToken(); switch (token) { @@ -396,28 +394,28 @@ public final class ObjectParser parser = fieldParserMap.get(fieldName); + FieldParser parser = fieldParserMap.get(fieldName); if (parser == null && false == ignoreUnknownFields) { throw new IllegalArgumentException("[" + name + "] unknown field [" + fieldName + "], parser not found"); } return parser; } - public static class FieldParser { - private final Parser parser; + private class FieldParser { + private final Parser parser; private final EnumSet supportedTokens; private final ParseField parseField; private final ValueType type; - public FieldParser(Parser parser, EnumSet supportedTokens, ParseField parseField, ValueType type) { + FieldParser(Parser parser, EnumSet supportedTokens, ParseField parseField, ValueType type) { this.parser = parser; this.supportedTokens = supportedTokens; this.parseField = parseField; this.type = type; } - public void assertSupports(String parserName, XContentParser.Token token, String currentFieldName, ParseFieldMatcher matcher) { - if (matcher.match(currentFieldName, parseField) == false) { + void assertSupports(String parserName, XContentParser.Token token, String currentFieldName) { + if (parseField.match(currentFieldName) == false) { throw new IllegalStateException("[" + parserName + "] parsefield doesn't accept: " + currentFieldName); } if (supportedTokens.contains(token) == false) { diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java b/core/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java index 81f5b995c18..0282fba7646 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.xcontent; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.collect.Tuple; @@ -75,12 +74,11 @@ public class ParseFieldRegistry { * Lookup a value from the registry by name while checking that the name matches the ParseField. * * @param name The name of the thing to look up. - * @param parseFieldMatcher to build nice error messages. * @return The value being looked up. Never null. * @throws ParsingException if the named thing isn't in the registry or the name was deprecated and deprecated names aren't supported. */ - public T lookup(String name, ParseFieldMatcher parseFieldMatcher, XContentLocation xContentLocation) { - T value = lookupReturningNullIfNotFound(name, parseFieldMatcher); + public T lookup(String name, XContentLocation xContentLocation) { + T value = lookupReturningNullIfNotFound(name); if (value == null) { throw new ParsingException(xContentLocation, "no [" + registryName + "] registered for [" + name + "]"); } @@ -91,19 +89,17 @@ public class ParseFieldRegistry { * Lookup a value from the registry by name while checking that the name matches the ParseField. * * @param name The name of the thing to look up. - * @param parseFieldMatcher The parseFieldMatcher. This is used to resolve the {@link ParseFieldMatcher} and to build nice - * error messages. * @return The value being looked up or null if it wasn't found. * @throws ParsingException if the named thing isn't in the registry or the name was deprecated and deprecated names aren't supported. */ - public T lookupReturningNullIfNotFound(String name, ParseFieldMatcher parseFieldMatcher) { + public T lookupReturningNullIfNotFound(String name) { Tuple parseFieldAndValue = registry.get(name); if (parseFieldAndValue == null) { return null; } ParseField parseField = parseFieldAndValue.v1(); T value = parseFieldAndValue.v2(); - boolean match = parseFieldMatcher.match(name, parseField); + boolean match = parseField.match(name); //this is always expected to match, ParseField is useful for deprecation warnings etc. here assert match : "ParseField did not match registered name [" + name + "][" + registryName + "]"; return value; diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/StatusToXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/StatusToXContentObject.java similarity index 94% rename from core/src/main/java/org/elasticsearch/common/xcontent/StatusToXContent.java rename to core/src/main/java/org/elasticsearch/common/xcontent/StatusToXContentObject.java index f22aa39613f..ba6ccdfffad 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/StatusToXContent.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/StatusToXContentObject.java @@ -24,7 +24,7 @@ import org.elasticsearch.rest.RestStatus; * Objects that can both render themselves in as json/yaml/etc and can provide a {@link RestStatus} for their response. Usually should be * implemented by top level responses sent back to users from REST endpoints. */ -public interface StatusToXContent extends ToXContent { +public interface StatusToXContentObject extends ToXContentObject { /** * Returns the REST status to make sure it is returned correctly diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java index 01111fa940a..3006363a4dd 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java @@ -26,6 +26,8 @@ import java.util.Map; /** * An interface allowing to transfer an object to "XContent" using an {@link XContentBuilder}. + * The output may or may not be a value object. Objects implementing {@link ToXContentObject} output a valid value + * but those that don't may or may not require emitting a startObject and an endObject. */ public interface ToXContent { @@ -126,4 +128,8 @@ public interface ToXContent { } XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException; + + default boolean isFragment() { + return true; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/IsTrueParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java similarity index 60% rename from test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/IsTrueParser.java rename to core/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java index 9dfc3be57e3..ed9aa304719 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/IsTrueParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java @@ -16,19 +16,19 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.test.rest.yaml.parser; -import org.elasticsearch.test.rest.yaml.section.IsTrueAssertion; - -import java.io.IOException; +package org.elasticsearch.common.xcontent; /** - * Parser for is_true assert sections + * An interface allowing to transfer an object to "XContent" using an {@link XContentBuilder}. + * The difference between {@link ToXContent} and {@link ToXContentObject} is that the former may output a fragment that + * requires to start and end a new anonymous object externally, while the latter guarantees that what gets printed + * out is fully valid syntax without any external addition. */ -public class IsTrueParser implements ClientYamlTestFragmentParser { +public interface ToXContentObject extends ToXContent { @Override - public IsTrueAssertion parse(ClientYamlTestSuiteParseContext parseContext) throws IOException, ClientYamlTestParseException { - return new IsTrueAssertion(parseContext.parser().getTokenLocation(), parseContext.parseField()); + default boolean isFragment() { + return false; } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContent.java index 72210f09d9b..879b9e9d723 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContent.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContent.java @@ -52,7 +52,7 @@ public interface XContent { */ static boolean isStrictDuplicateDetectionEnabled() { // Don't allow duplicate keys in JSON content by default but let the user opt out - return Booleans.parseBooleanExact(System.getProperty("es.xcontent.strict_duplicate_detection", "true")); + return Booleans.parseBoolean(System.getProperty("es.xcontent.strict_duplicate_detection", "true")); } /** @@ -83,31 +83,31 @@ public interface XContent { /** * Creates a parser over the provided string content. */ - XContentParser createParser(String content) throws IOException; + XContentParser createParser(NamedXContentRegistry xContentRegistry, String content) throws IOException; /** * Creates a parser over the provided input stream. */ - XContentParser createParser(InputStream is) throws IOException; + XContentParser createParser(NamedXContentRegistry xContentRegistry, InputStream is) throws IOException; /** * Creates a parser over the provided bytes. */ - XContentParser createParser(byte[] data) throws IOException; + XContentParser createParser(NamedXContentRegistry xContentRegistry, byte[] data) throws IOException; /** * Creates a parser over the provided bytes. */ - XContentParser createParser(byte[] data, int offset, int length) throws IOException; + XContentParser createParser(NamedXContentRegistry xContentRegistry, byte[] data, int offset, int length) throws IOException; /** * Creates a parser over the provided bytes. */ - XContentParser createParser(BytesReference bytes) throws IOException; + XContentParser createParser(NamedXContentRegistry xContentRegistry, BytesReference bytes) throws IOException; /** * Creates a parser over the provided reader. */ - XContentParser createParser(Reader reader) throws IOException; + XContentParser createParser(NamedXContentRegistry xContentRegistry, Reader reader) throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index df34ec726fd..189e9d3c8d5 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -964,21 +964,60 @@ public final class XContentBuilder implements BytesStream, Releasable, Flushable // Raw fields ////////////////////////////////// + /** + * Writes a raw field with the value taken from the bytes in the stream + * @deprecated use {@link #rawField(String, InputStream, XContentType)} to avoid content type auto-detection + */ + @Deprecated public XContentBuilder rawField(String name, InputStream value) throws IOException { generator.writeRawField(name, value); return this; } + /** + * Writes a raw field with the value taken from the bytes in the stream + */ + public XContentBuilder rawField(String name, InputStream value, XContentType contentType) throws IOException { + generator.writeRawField(name, value, contentType); + return this; + } + + /** + * Writes a raw field with the given bytes as the value + * @deprecated use {@link #rawField(String name, BytesReference, XContentType)} to avoid content type auto-detection + */ + @Deprecated public XContentBuilder rawField(String name, BytesReference value) throws IOException { generator.writeRawField(name, value); return this; } + /** + * Writes a raw field with the given bytes as the value + */ + public XContentBuilder rawField(String name, BytesReference value, XContentType contentType) throws IOException { + generator.writeRawField(name, value, contentType); + return this; + } + + /** + * Writes a value with the source coming directly from the bytes + * @deprecated use {@link #rawValue(BytesReference, XContentType)} to avoid content type auto-detection + */ + @Deprecated public XContentBuilder rawValue(BytesReference value) throws IOException { generator.writeRawValue(value); return this; } + /** + * Writes a value with the source coming directly from the bytes + */ + public XContentBuilder rawValue(BytesReference value, XContentType contentType) throws IOException { + generator.writeRawValue(value, contentType); + return this; + } + public XContentBuilder copyCurrentStructure(XContentParser parser) throws IOException { generator.copyCurrentStructure(parser); return this; diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java index 478f3a8a08f..60a188ca6ce 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java @@ -86,12 +86,42 @@ public interface XContentGenerator extends Closeable, Flushable { void writeBinary(byte[] value, int offset, int length) throws IOException; + /** + * Writes a raw field with the value taken from the bytes in the stream + * @deprecated use {@link #writeRawField(String, InputStream, XContentType)} to avoid content type auto-detection + */ + @Deprecated void writeRawField(String name, InputStream value) throws IOException; + /** + * Writes a raw field with the value taken from the bytes in the stream + */ + void writeRawField(String name, InputStream value, XContentType xContentType) throws IOException; + + /** + * Writes a raw field with the given bytes as the value + * @deprecated use {@link #writeRawField(String, BytesReference, XContentType)} to avoid content type auto-detection + */ + @Deprecated void writeRawField(String name, BytesReference value) throws IOException; + /** + * Writes a raw field with the given bytes as the value + */ + void writeRawField(String name, BytesReference value, XContentType xContentType) throws IOException; + + /** + * Writes a value with the source coming directly from the bytes + * @deprecated use {@link #writeRawValue(BytesReference, XContentType)} to avoid content type auto-detection + */ + @Deprecated void writeRawValue(BytesReference value) throws IOException; + /** + * Writes a value with the source coming directly from the bytes + */ + void writeRawValue(BytesReference value, XContentType xContentType) throws IOException; + void copyCurrentStructure(XContentParser parser) throws IOException; /** diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 1625289e528..dd7508280d9 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -41,23 +41,61 @@ import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; @SuppressWarnings("unchecked") public class XContentHelper { - public static XContentParser createParser(BytesReference bytes) throws IOException { + /** + * Creates a parser based on the bytes provided + * @deprecated use {@link #createParser(NamedXContentRegistry, BytesReference, XContentType)} to avoid content type auto-detection + */ + @Deprecated + public static XContentParser createParser(NamedXContentRegistry xContentRegistry, BytesReference bytes) throws IOException { Compressor compressor = CompressorFactory.compressor(bytes); if (compressor != null) { InputStream compressedInput = compressor.streamInput(bytes.streamInput()); if (compressedInput.markSupported() == false) { compressedInput = new BufferedInputStream(compressedInput); } - XContentType contentType = XContentFactory.xContentType(compressedInput); - return XContentFactory.xContent(contentType).createParser(compressedInput); + final XContentType contentType = XContentFactory.xContentType(compressedInput); + return XContentFactory.xContent(contentType).createParser(xContentRegistry, compressedInput); } else { - return XContentFactory.xContent(bytes).createParser(bytes.streamInput()); + return XContentFactory.xContent(bytes).createParser(xContentRegistry, bytes.streamInput()); } } - public static Tuple> convertToMap(BytesReference bytes, boolean ordered) throws ElasticsearchParseException { + /** + * Creates a parser for the bytes using the supplied content-type + */ + public static XContentParser createParser(NamedXContentRegistry xContentRegistry, BytesReference bytes, + XContentType xContentType) throws IOException { + Objects.requireNonNull(xContentType); + Compressor compressor = CompressorFactory.compressor(bytes); + if (compressor != null) { + InputStream compressedInput = compressor.streamInput(bytes.streamInput()); + if (compressedInput.markSupported() == false) { + compressedInput = new BufferedInputStream(compressedInput); + } + return XContentFactory.xContent(xContentType).createParser(xContentRegistry, compressedInput); + } else { + return xContentType.xContent().createParser(xContentRegistry, bytes.streamInput()); + } + } + + /** + * Converts the given bytes into a map that is optionally ordered. + * @deprecated this method relies on auto-detection of content type. Use {@link #convertToMap(BytesReference, boolean, XContentType)} + * instead with the proper {@link XContentType} + */ + @Deprecated + public static Tuple> convertToMap(BytesReference bytes, boolean ordered) + throws ElasticsearchParseException { + return convertToMap(bytes, ordered, null); + } + + /** + * Converts the given bytes into a map that is optionally ordered. The provided {@link XContentType} must be non-null. + */ + public static Tuple> convertToMap(BytesReference bytes, boolean ordered, XContentType xContentType) + throws ElasticsearchParseException { try { - XContentType contentType; + final XContentType contentType; InputStream input; Compressor compressor = CompressorFactory.compressor(bytes); if (compressor != null) { @@ -65,34 +103,68 @@ public class XContentHelper { if (compressedStreamInput.markSupported() == false) { compressedStreamInput = new BufferedInputStream(compressedStreamInput); } - contentType = XContentFactory.xContentType(compressedStreamInput); input = compressedStreamInput; } else { - contentType = XContentFactory.xContentType(bytes); input = bytes.streamInput(); } - try (XContentParser parser = XContentFactory.xContent(contentType).createParser(input)) { - if (ordered) { - return Tuple.tuple(contentType, parser.mapOrdered()); - } else { - return Tuple.tuple(contentType, parser.map()); - } - } + contentType = xContentType != null ? xContentType : XContentFactory.xContentType(input); + return new Tuple<>(Objects.requireNonNull(contentType), convertToMap(XContentFactory.xContent(contentType), input, ordered)); } catch (IOException e) { throw new ElasticsearchParseException("Failed to parse content to map", e); } } + /** + * Convert a string in some {@link XContent} format to a {@link Map}. Throws an {@link ElasticsearchParseException} if there is any + * error. + */ + public static Map convertToMap(XContent xContent, String string, boolean ordered) throws ElasticsearchParseException { + // It is safe to use EMPTY here because this never uses namedObject + try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, string)) { + return ordered ? parser.mapOrdered() : parser.map(); + } catch (IOException e) { + throw new ElasticsearchParseException("Failed to parse content to map", e); + } + } + + /** + * Convert a string in some {@link XContent} format to a {@link Map}. Throws an {@link ElasticsearchParseException} if there is any + * error. Note that unlike {@link #convertToMap(BytesReference, boolean)}, this doesn't automatically uncompress the input. + */ + public static Map convertToMap(XContent xContent, InputStream input, boolean ordered) + throws ElasticsearchParseException { + // It is safe to use EMPTY here because this never uses namedObject + try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, input)) { + return ordered ? parser.mapOrdered() : parser.map(); + } catch (IOException e) { + throw new ElasticsearchParseException("Failed to parse content to map", e); + } + } + + @Deprecated public static String convertToJson(BytesReference bytes, boolean reformatJson) throws IOException { return convertToJson(bytes, reformatJson, false); } + @Deprecated public static String convertToJson(BytesReference bytes, boolean reformatJson, boolean prettyPrint) throws IOException { - XContentType xContentType = XContentFactory.xContentType(bytes); + return convertToJson(bytes, reformatJson, prettyPrint, XContentFactory.xContentType(bytes)); + } + + public static String convertToJson(BytesReference bytes, boolean reformatJson, XContentType xContentType) throws IOException { + return convertToJson(bytes, reformatJson, false, xContentType); + } + + public static String convertToJson(BytesReference bytes, boolean reformatJson, boolean prettyPrint, XContentType xContentType) + throws IOException { + Objects.requireNonNull(xContentType); if (xContentType == XContentType.JSON && !reformatJson) { return bytes.utf8ToString(); } - try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(bytes.streamInput())) { + + // It is safe to use EMPTY here because this never uses namedObject + try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(NamedXContentRegistry.EMPTY, + bytes.streamInput())) { parser.nextToken(); XContentBuilder builder = XContentFactory.jsonBuilder(); if (prettyPrint) { @@ -191,7 +263,6 @@ public class XContentHelper { * Merges the defaults provided as the second parameter into the content of the first. Only does recursive merge * for inner maps. */ - @SuppressWarnings({"unchecked"}) public static void mergeDefaults(Map content, Map defaults) { for (Map.Entry defaultEntry : defaults.entrySet()) { if (!content.containsKey(defaultEntry.getKey())) { @@ -255,33 +326,36 @@ public class XContentHelper { return true; } - public static void copyCurrentStructure(XContentGenerator generator, XContentParser parser) throws IOException { + /** + * Low level implementation detail of {@link XContentGenerator#copyCurrentStructure(XContentParser)}. + */ + public static void copyCurrentStructure(XContentGenerator destination, XContentParser parser) throws IOException { XContentParser.Token token = parser.currentToken(); // Let's handle field-name separately first if (token == XContentParser.Token.FIELD_NAME) { - generator.writeFieldName(parser.currentName()); + destination.writeFieldName(parser.currentName()); token = parser.nextToken(); // fall-through to copy the associated value } switch (token) { case START_ARRAY: - generator.writeStartArray(); + destination.writeStartArray(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - copyCurrentStructure(generator, parser); + copyCurrentStructure(destination, parser); } - generator.writeEndArray(); + destination.writeEndArray(); break; case START_OBJECT: - generator.writeStartObject(); + destination.writeStartObject(); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - copyCurrentStructure(generator, parser); + copyCurrentStructure(destination, parser); } - generator.writeEndObject(); + destination.writeEndObject(); break; default: // others are simple: - copyCurrentEvent(generator, parser); + copyCurrentEvent(destination, parser); } } @@ -339,7 +413,10 @@ public class XContentHelper { /** * Writes a "raw" (bytes) field, handling cases where the bytes are compressed, and tries to optimize writing using * {@link XContentBuilder#rawField(String, org.elasticsearch.common.bytes.BytesReference)}. + * @deprecated use {@link #writeRawField(String, BytesReference, XContentType, XContentBuilder, Params)} to avoid content type + * auto-detection */ + @Deprecated public static void writeRawField(String field, BytesReference source, XContentBuilder builder, ToXContent.Params params) throws IOException { Compressor compressor = CompressorFactory.compressor(source); if (compressor != null) { @@ -349,4 +426,38 @@ public class XContentHelper { builder.rawField(field, source); } } + + /** + * Writes a "raw" (bytes) field, handling cases where the bytes are compressed, and tries to optimize writing using + * {@link XContentBuilder#rawField(String, org.elasticsearch.common.bytes.BytesReference, XContentType)}. + */ + public static void writeRawField(String field, BytesReference source, XContentType xContentType, XContentBuilder builder, + ToXContent.Params params) throws IOException { + Objects.requireNonNull(xContentType); + Compressor compressor = CompressorFactory.compressor(source); + if (compressor != null) { + InputStream compressedStreamInput = compressor.streamInput(source.streamInput()); + builder.rawField(field, compressedStreamInput, xContentType); + } else { + builder.rawField(field, source, xContentType); + } + } + + /** + * Returns the bytes that represent the XContent output of the provided {@link ToXContent} object, using the provided + * {@link XContentType}. Wraps the output into a new anonymous object. + */ + public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, boolean humanReadable) throws IOException { + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + builder.humanReadable(humanReadable); + if (toXContent.isFragment()) { + builder.startObject(); + } + toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS); + if (toXContent.isFragment()) { + builder.endObject(); + } + return builder.bytes(); + } + } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java index e5ab2a9f4c7..fc0b5c0f4f2 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java @@ -33,7 +33,7 @@ import java.util.Map; * *
      *     XContentType xContentType = XContentType.JSON;
    - *     XContentParser parser = xContentType.xContent().createParser("{\"key\" : \"value\"}");
    + *     XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, "{\"key\" : \"value\"}");
      * 
    */ public interface XContentParser extends Releasable { @@ -201,16 +201,32 @@ public interface XContentParser extends Releasable { double doubleValue() throws IOException; + /** + * @return true iff the current value is either boolean (true or false) or one of "false", "true". + */ + boolean isBooleanValue() throws IOException; + + boolean booleanValue() throws IOException; + + // TODO #22298: Remove this method and replace all call sites with #isBooleanValue() /** * returns true if the current value is boolean in nature. * values that are considered booleans: * - boolean value (true/false) * - numeric integers (=0 is considered as false, !=0 is true) * - one of the following strings: "true","false","on","off","yes","no","1","0" + * + * @deprecated Just present for providing backwards compatibility. Use {@link #isBooleanValue()} instead. */ - boolean isBooleanValue() throws IOException; + @Deprecated + boolean isBooleanValueLenient() throws IOException; - boolean booleanValue() throws IOException; + // TODO #22298: Remove this method and replace all call sites with #booleanValue() + /** + * @deprecated Just present for providing backwards compatibility. Use {@link #booleanValue()} instead. + */ + @Deprecated + boolean booleanValueLenient() throws IOException; /** * Reads a plain binary value that was written via one of the following methods: @@ -249,5 +265,16 @@ public interface XContentParser extends Releasable { */ XContentLocation getTokenLocation(); + // TODO remove context entirely when it isn't needed + /** + * Parse an object by name. + */ + T namedObject(Class categoryClass, String name, Object context) throws IOException; + + /** + * The registry used to resolve {@link #namedObject(Class, String, Object)}. Use this when building a sub-parser from this parser. + */ + NamedXContentRegistry getXContentRegistry(); + boolean isClosed(); } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java index a2180152444..fec83eefbdf 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.xcontent; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.XContentParser.Token; import java.io.IOException; @@ -35,32 +36,17 @@ public final class XContentParserUtils { private XContentParserUtils() { } - /** - * Makes sure that current token is of type {@link XContentParser.Token#FIELD_NAME} - * - * @return the token - * @throws ParsingException if the token is not of type {@link XContentParser.Token#FIELD_NAME} - */ - public static Token ensureFieldName(Token token, Supplier location) throws IOException { - return ensureType(Token.FIELD_NAME, token, location); - } - /** * Makes sure that current token is of type {@link XContentParser.Token#FIELD_NAME} and the the field name is equal to the provided one - * - * @return the token - * @throws ParsingException if the token is not of type {@link XContentParser.Token#FIELD_NAME} or is not equal to the given - * field name + * @throws ParsingException if the token is not of type {@link XContentParser.Token#FIELD_NAME} or is not equal to the given field name */ - public static Token ensureFieldName(XContentParser parser, Token token, String fieldName) throws IOException { - Token t = ensureType(Token.FIELD_NAME, token, parser::getTokenLocation); - - String current = parser.currentName() != null ? parser.currentName() : ""; - if (current.equals(fieldName) == false) { + public static void ensureFieldName(XContentParser parser, Token token, String fieldName) throws IOException { + ensureExpectedToken(Token.FIELD_NAME, token, parser::getTokenLocation); + String currentName = parser.currentName(); + if (currentName.equals(fieldName) == false) { String message = "Failed to parse object: expecting field with name [%s] but found [%s]"; - throw new ParsingException(parser.getTokenLocation(), String.format(Locale.ROOT, message, fieldName, current)); + throw new ParsingException(parser.getTokenLocation(), String.format(Locale.ROOT, message, fieldName, currentName)); } - return t; } /** @@ -72,16 +58,53 @@ public final class XContentParserUtils { } /** - * Makes sure that current token is of the expected type + * @throws ParsingException with a "unknown token found" reason + */ + public static void throwUnknownToken(XContentParser.Token token, XContentLocation location) { + String message = "Failed to parse object: unexpected token [%s] found"; + throw new ParsingException(location, String.format(Locale.ROOT, message, token)); + } + + /** + * Makes sure that provided token is of the expected type * - * @return the token * @throws ParsingException if the token is not equal to the expected type */ - private static Token ensureType(Token expected, Token current, Supplier location) { - if (current != expected) { + public static void ensureExpectedToken(Token expected, Token actual, Supplier location) { + if (actual != expected) { String message = "Failed to parse object: expecting token of type [%s] but found [%s]"; - throw new ParsingException(location.get(), String.format(Locale.ROOT, message, expected, current)); + throw new ParsingException(location.get(), String.format(Locale.ROOT, message, expected, actual)); } - return current; + } + + /** + * Parse the current token depending on its token type. The following token types will be + * parsed by the corresponding parser methods: + *
      + *
    • XContentParser.Token.VALUE_STRING: parser.text()
    • + *
    • XContentParser.Token.VALUE_NUMBER: parser.numberValue()
    • + *
    • XContentParser.Token.VALUE_BOOLEAN: parser.booleanValue()
    • + *
    • XContentParser.Token.VALUE_EMBEDDED_OBJECT: parser.binaryValue()
    • + *
    + * + * @throws ParsingException if the token none of the allowed values + */ + public static Object parseStoredFieldsValue(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + Object value = null; + if (token == XContentParser.Token.VALUE_STRING) { + //binary values will be parsed back and returned as base64 strings when reading from json and yaml + value = parser.text(); + } else if (token == XContentParser.Token.VALUE_NUMBER) { + value = parser.numberValue(); + } else if (token == XContentParser.Token.VALUE_BOOLEAN) { + value = parser.booleanValue(); + } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { + //binary values will be parsed back and returned as BytesArray when reading from cbor and smile + value = new BytesArray(parser.binaryValue()); + } else { + throwUnknownToken(token, parser.getTokenLocation()); + } + return value; } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentType.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentType.java index ddd736e0d00..8e3c2982704 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentType.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentType.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.yaml.YamlXContent; import java.io.IOException; import java.util.Locale; +import java.util.Objects; /** * The content type of {@link org.elasticsearch.common.xcontent.XContent}. @@ -40,7 +41,7 @@ public enum XContentType implements Writeable { */ JSON(0) { @Override - protected String mediaTypeWithoutParameters() { + public String mediaTypeWithoutParameters() { return "application/json"; } @@ -64,7 +65,7 @@ public enum XContentType implements Writeable { */ SMILE(1) { @Override - protected String mediaTypeWithoutParameters() { + public String mediaTypeWithoutParameters() { return "application/smile"; } @@ -83,7 +84,7 @@ public enum XContentType implements Writeable { */ YAML(2) { @Override - protected String mediaTypeWithoutParameters() { + public String mediaTypeWithoutParameters() { return "application/yaml"; } @@ -102,7 +103,7 @@ public enum XContentType implements Writeable { */ CBOR(3) { @Override - protected String mediaTypeWithoutParameters() { + public String mediaTypeWithoutParameters() { return "application/cbor"; } @@ -117,12 +118,18 @@ public enum XContentType implements Writeable { } }; + /** + * Accepts either a format string, which is equivalent to {@link XContentType#shortName()} or a media type that optionally has + * parameters and attempts to match the value to an {@link XContentType}. The comparisons are done in lower case format and this method + * also supports a wildcard accept for {@code application/*}. This method can be used to parse the {@code Accept} HTTP header or a + * format query string parameter. This method will return {@code null} if no match is found + */ public static XContentType fromMediaTypeOrFormat(String mediaType) { if (mediaType == null) { return null; } for (XContentType type : values()) { - if (isSameMediaTypeAs(mediaType, type)) { + if (isSameMediaTypeOrFormatAs(mediaType, type)) { return type; } } @@ -133,7 +140,22 @@ public enum XContentType implements Writeable { return null; } - private static boolean isSameMediaTypeAs(String stringType, XContentType type) { + /** + * Attempts to match the given media type with the known {@link XContentType} values. This match is done in a case-insensitive manner. + * The provided media type should not include any parameters. This method is suitable for parsing part of the {@code Content-Type} + * HTTP header. This method will return {@code null} if no match is found + */ + public static XContentType fromMediaType(String mediaType) { + final String lowercaseMediaType = Objects.requireNonNull(mediaType, "mediaType cannot be null").toLowerCase(Locale.ROOT); + for (XContentType type : values()) { + if (type.mediaTypeWithoutParameters().equals(lowercaseMediaType)) { + return type; + } + } + return null; + } + + private static boolean isSameMediaTypeOrFormatAs(String stringType, XContentType type) { return type.mediaTypeWithoutParameters().equalsIgnoreCase(stringType) || stringType.toLowerCase(Locale.ROOT).startsWith(type.mediaTypeWithoutParameters().toLowerCase(Locale.ROOT) + ";") || type.shortName().equalsIgnoreCase(stringType); @@ -157,7 +179,7 @@ public enum XContentType implements Writeable { public abstract XContent xContent(); - protected abstract String mediaTypeWithoutParameters(); + public abstract String mediaTypeWithoutParameters(); public static XContentType readFrom(StreamInput in) throws IOException { int index = in.readVInt(); diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java index d79173cfc2b..56435fd364b 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java @@ -26,6 +26,7 @@ import com.fasterxml.jackson.dataformat.cbor.CBORFactory; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.FastStringReader; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentGenerator; @@ -78,33 +79,33 @@ public class CborXContent implements XContent { } @Override - public XContentParser createParser(String content) throws IOException { - return new CborXContentParser(cborFactory.createParser(new FastStringReader(content))); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, String content) throws IOException { + return new CborXContentParser(xContentRegistry, cborFactory.createParser(new FastStringReader(content))); } @Override - public XContentParser createParser(InputStream is) throws IOException { - return new CborXContentParser(cborFactory.createParser(is)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, InputStream is) throws IOException { + return new CborXContentParser(xContentRegistry, cborFactory.createParser(is)); } @Override - public XContentParser createParser(byte[] data) throws IOException { - return new CborXContentParser(cborFactory.createParser(data)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, byte[] data) throws IOException { + return new CborXContentParser(xContentRegistry, cborFactory.createParser(data)); } @Override - public XContentParser createParser(byte[] data, int offset, int length) throws IOException { - return new CborXContentParser(cborFactory.createParser(data, offset, length)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, byte[] data, int offset, int length) throws IOException { + return new CborXContentParser(xContentRegistry, cborFactory.createParser(data, offset, length)); } @Override - public XContentParser createParser(BytesReference bytes) throws IOException { - return createParser(bytes.streamInput()); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, BytesReference bytes) throws IOException { + return createParser(xContentRegistry, bytes.streamInput()); } @Override - public XContentParser createParser(Reader reader) throws IOException { - return new CborXContentParser(cborFactory.createParser(reader)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, Reader reader) throws IOException { + return new CborXContentParser(xContentRegistry, cborFactory.createParser(reader)); } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java index e63a928109d..119cb5c98c6 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java @@ -20,20 +20,14 @@ package org.elasticsearch.common.xcontent.cbor; import com.fasterxml.jackson.core.JsonGenerator; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContentGenerator; import java.io.OutputStream; -import java.util.Collections; import java.util.Set; public class CborXContentGenerator extends JsonXContentGenerator { - public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os) { - this(jsonGenerator, os, Collections.emptySet(), Collections.emptySet()); - } - public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, Set includes, Set excludes) { super(jsonGenerator, os, includes, excludes); } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java index 772a5322cc7..61b4886420f 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java @@ -20,13 +20,15 @@ package org.elasticsearch.common.xcontent.cbor; import com.fasterxml.jackson.core.JsonParser; + +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContentParser; public class CborXContentParser extends JsonXContentParser { - public CborXContentParser(JsonParser parser) { - super(parser); + public CborXContentParser(NamedXContentRegistry xContentRegistry, JsonParser parser) { + super(xContentRegistry, parser); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java index 1b0b351e6ef..2e4393723e0 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java @@ -25,6 +25,7 @@ import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.FastStringReader; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentGenerator; @@ -79,32 +80,32 @@ public class JsonXContent implements XContent { } @Override - public XContentParser createParser(String content) throws IOException { - return new JsonXContentParser(jsonFactory.createParser(new FastStringReader(content))); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, String content) throws IOException { + return new JsonXContentParser(xContentRegistry, jsonFactory.createParser(new FastStringReader(content))); } @Override - public XContentParser createParser(InputStream is) throws IOException { - return new JsonXContentParser(jsonFactory.createParser(is)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, InputStream is) throws IOException { + return new JsonXContentParser(xContentRegistry, jsonFactory.createParser(is)); } @Override - public XContentParser createParser(byte[] data) throws IOException { - return new JsonXContentParser(jsonFactory.createParser(data)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, byte[] data) throws IOException { + return new JsonXContentParser(xContentRegistry, jsonFactory.createParser(data)); } @Override - public XContentParser createParser(byte[] data, int offset, int length) throws IOException { - return new JsonXContentParser(jsonFactory.createParser(data, offset, length)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, byte[] data, int offset, int length) throws IOException { + return new JsonXContentParser(xContentRegistry, jsonFactory.createParser(data, offset, length)); } @Override - public XContentParser createParser(BytesReference bytes) throws IOException { - return createParser(bytes.streamInput()); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, BytesReference bytes) throws IOException { + return createParser(xContentRegistry, bytes.streamInput()); } @Override - public XContentParser createParser(Reader reader) throws IOException { - return new JsonXContentParser(jsonFactory.createParser(reader)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, Reader reader) throws IOException { + return new JsonXContentParser(xContentRegistry, jsonFactory.createParser(reader)); } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java index 763fac4c6a3..1e09f8334f7 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java @@ -31,6 +31,7 @@ import com.fasterxml.jackson.core.util.JsonGeneratorDelegate; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentGenerator; @@ -43,7 +44,6 @@ import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.util.Collections; import java.util.Objects; import java.util.Set; @@ -72,10 +72,6 @@ public class JsonXContentGenerator implements XContentGenerator { private static final DefaultPrettyPrinter.Indenter INDENTER = new DefaultIndenter(" ", LF.getValue()); private boolean prettyPrint = false; - public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os) { - this(jsonGenerator, os, Collections.emptySet(), Collections.emptySet()); - } - public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, Set includes, Set excludes) { Objects.requireNonNull(includes, "Including filters must not be null"); Objects.requireNonNull(excludes, "Excluding filters must not be null"); @@ -311,8 +307,14 @@ public class JsonXContentGenerator implements XContentGenerator { if (contentType == null) { throw new IllegalArgumentException("Can't write raw bytes whose xcontent-type can't be guessed"); } + writeRawField(name, content, contentType); + } + + @Override + public void writeRawField(String name, InputStream content, XContentType contentType) throws IOException { if (mayWriteRawData(contentType) == false) { - try (XContentParser parser = XContentFactory.xContent(contentType).createParser(content)) { + // EMPTY is safe here because we never call namedObject when writing raw data + try (XContentParser parser = XContentFactory.xContent(contentType).createParser(NamedXContentRegistry.EMPTY, content)) { parser.nextToken(); writeFieldName(name); copyCurrentStructure(parser); @@ -331,6 +333,11 @@ public class JsonXContentGenerator implements XContentGenerator { if (contentType == null) { throw new IllegalArgumentException("Can't write raw bytes whose xcontent-type can't be guessed"); } + writeRawField(name, content, contentType); + } + + @Override + public final void writeRawField(String name, BytesReference content, XContentType contentType) throws IOException { if (mayWriteRawData(contentType) == false) { writeFieldName(name); copyRawValue(content, contentType.xContent()); @@ -348,6 +355,11 @@ public class JsonXContentGenerator implements XContentGenerator { if (contentType == null) { throw new IllegalArgumentException("Can't write raw bytes whose xcontent-type can't be guessed"); } + writeRawValue(content, contentType); + } + + @Override + public final void writeRawValue(BytesReference content, XContentType contentType) throws IOException { if (mayWriteRawData(contentType) == false) { copyRawValue(content, contentType.xContent()); } else { @@ -378,8 +390,9 @@ public class JsonXContentGenerator implements XContentGenerator { } protected void copyRawValue(BytesReference content, XContent xContent) throws IOException { + // EMPTY is safe here because we never call namedObject try (StreamInput input = content.streamInput(); - XContentParser parser = xContent.createParser(input)) { + XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, input)) { copyCurrentStructure(parser); } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java index f7ed46a6496..e5c30208ed6 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java @@ -22,8 +22,10 @@ package org.elasticsearch.common.xcontent.json; import com.fasterxml.jackson.core.JsonLocation; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; + import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.AbstractXContentParser; @@ -35,7 +37,8 @@ public class JsonXContentParser extends AbstractXContentParser { final JsonParser parser; - public JsonXContentParser(JsonParser parser) { + public JsonXContentParser(NamedXContentRegistry xContentRegistry, JsonParser parser) { + super(xContentRegistry); this.parser = parser; } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java index 643326cd82f..b43a13a9193 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java @@ -26,6 +26,7 @@ import com.fasterxml.jackson.dataformat.smile.SmileFactory; import com.fasterxml.jackson.dataformat.smile.SmileGenerator; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.FastStringReader; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentGenerator; @@ -79,32 +80,32 @@ public class SmileXContent implements XContent { } @Override - public XContentParser createParser(String content) throws IOException { - return new SmileXContentParser(smileFactory.createParser(new FastStringReader(content))); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, String content) throws IOException { + return new SmileXContentParser(xContentRegistry, smileFactory.createParser(new FastStringReader(content))); } @Override - public XContentParser createParser(InputStream is) throws IOException { - return new SmileXContentParser(smileFactory.createParser(is)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, InputStream is) throws IOException { + return new SmileXContentParser(xContentRegistry, smileFactory.createParser(is)); } @Override - public XContentParser createParser(byte[] data) throws IOException { - return new SmileXContentParser(smileFactory.createParser(data)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, byte[] data) throws IOException { + return new SmileXContentParser(xContentRegistry, smileFactory.createParser(data)); } @Override - public XContentParser createParser(byte[] data, int offset, int length) throws IOException { - return new SmileXContentParser(smileFactory.createParser(data, offset, length)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, byte[] data, int offset, int length) throws IOException { + return new SmileXContentParser(xContentRegistry, smileFactory.createParser(data, offset, length)); } @Override - public XContentParser createParser(BytesReference bytes) throws IOException { - return createParser(bytes.streamInput()); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, BytesReference bytes) throws IOException { + return createParser(xContentRegistry, bytes.streamInput()); } @Override - public XContentParser createParser(Reader reader) throws IOException { - return new SmileXContentParser(smileFactory.createParser(reader)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, Reader reader) throws IOException { + return new SmileXContentParser(xContentRegistry, smileFactory.createParser(reader)); } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java index afa420805f7..f368c0e383f 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java @@ -20,20 +20,14 @@ package org.elasticsearch.common.xcontent.smile; import com.fasterxml.jackson.core.JsonGenerator; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContentGenerator; import java.io.OutputStream; -import java.util.Collections; import java.util.Set; public class SmileXContentGenerator extends JsonXContentGenerator { - public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os) { - this(jsonGenerator, os, Collections.emptySet(), Collections.emptySet()); - } - public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, Set includes, Set excludes) { super(jsonGenerator, os, includes, excludes); } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java index ad8e12e70bf..c7b4b8c000c 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java @@ -20,13 +20,15 @@ package org.elasticsearch.common.xcontent.smile; import com.fasterxml.jackson.core.JsonParser; + +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContentParser; public class SmileXContentParser extends JsonXContentParser { - public SmileXContentParser(JsonParser parser) { - super(parser); + public SmileXContentParser(NamedXContentRegistry xContentRegistry, JsonParser parser) { + super(xContentRegistry, parser); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index d13dcbd9c93..95fe08d96c3 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.xcontent.support; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -49,7 +50,11 @@ public abstract class AbstractXContentParser implements XContentParser { } } + private final NamedXContentRegistry xContentRegistry; + public AbstractXContentParser(NamedXContentRegistry xContentRegistry) { + this.xContentRegistry = xContentRegistry; + } // The 3rd party parsers we rely on are known to silently truncate fractions: see // http://fasterxml.github.io/jackson-core/javadoc/2.3.0/com/fasterxml/jackson/core/JsonParser.html#getShortValue() @@ -72,9 +77,6 @@ public abstract class AbstractXContentParser implements XContentParser { switch (currentToken()) { case VALUE_BOOLEAN: return true; - case VALUE_NUMBER: - NumberType numberType = numberType(); - return numberType == NumberType.LONG || numberType == NumberType.INT; case VALUE_STRING: return Booleans.isBoolean(textCharacters(), textOffset(), textLength()); default: @@ -84,11 +86,37 @@ public abstract class AbstractXContentParser implements XContentParser { @Override public boolean booleanValue() throws IOException { + Token token = currentToken(); + if (token == Token.VALUE_STRING) { + return Booleans.parseBoolean(textCharacters(), textOffset(), textLength(), false /* irrelevant */); + } + return doBooleanValue(); + } + + @Override + @Deprecated + public boolean isBooleanValueLenient() throws IOException { + switch (currentToken()) { + case VALUE_BOOLEAN: + return true; + case VALUE_NUMBER: + NumberType numberType = numberType(); + return numberType == NumberType.LONG || numberType == NumberType.INT; + case VALUE_STRING: + return Booleans.isBooleanLenient(textCharacters(), textOffset(), textLength()); + default: + return false; + } + } + + @Override + @Deprecated + public boolean booleanValueLenient() throws IOException { Token token = currentToken(); if (token == Token.VALUE_NUMBER) { return intValue() != 0; } else if (token == Token.VALUE_STRING) { - return Booleans.parseBoolean(textCharacters(), textOffset(), textLength(), false /* irrelevant */); + return Booleans.parseBooleanLenient(textCharacters(), textOffset(), textLength(), false /* irrelevant */); } return doBooleanValue(); } @@ -356,6 +384,16 @@ public abstract class AbstractXContentParser implements XContentParser { return null; } + @Override + public T namedObject(Class categoryClass, String name, Object context) throws IOException { + return xContentRegistry.parseNamedObject(categoryClass, name, this, context); + } + + @Override + public NamedXContentRegistry getXContentRegistry() { + return xContentRegistry; + } + @Override public abstract boolean isClosed(); } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java b/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java index a1affb4fe57..36eacb81f83 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java @@ -24,6 +24,7 @@ import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; @@ -263,7 +264,7 @@ public class XContentMapValues { List filteredValue = filter((Iterable) value, subIncludeAutomaton, subIncludeState, excludeAutomaton, excludeState, matchAllAutomaton); - if (includeAutomaton.isAccept(includeState) || filteredValue.isEmpty() == false) { + if (filteredValue.isEmpty() == false) { filtered.put(key, filteredValue); } @@ -286,6 +287,7 @@ public class XContentMapValues { CharacterRunAutomaton excludeAutomaton, int initialExcludeState, CharacterRunAutomaton matchAllAutomaton) { List filtered = new ArrayList<>(); + boolean isInclude = includeAutomaton.isAccept(initialIncludeState); for (Object value : iterable) { if (value instanceof Map) { int includeState = includeAutomaton.step(initialIncludeState, '.'); @@ -304,9 +306,8 @@ public class XContentMapValues { if (filteredValue.isEmpty() == false) { filtered.add(filteredValue); } - } else { - // TODO: we have tests relying on this behavior on arrays even - // if the path does not match, but this looks like a bug? + } else if (isInclude) { + // #22557: only accept this array value if the key we are on is accepted: filtered.add(value); } } @@ -412,39 +413,29 @@ public class XContentMapValues { return Long.parseLong(node.toString()); } - /** - * This method is very lenient, use {@link #nodeBooleanValue} instead. - */ - public static boolean lenientNodeBooleanValue(Object node, boolean defaultValue) { - if (node == null) { - return defaultValue; + public static boolean nodeBooleanValue(Object node, String name, boolean defaultValue) { + try { + return nodeBooleanValue(node, defaultValue); + } catch (IllegalArgumentException ex) { + throw new IllegalArgumentException("Could not convert [" + name + "] to boolean", ex); } - return lenientNodeBooleanValue(node); } - /** - * This method is very lenient, use {@link #nodeBooleanValue} instead. - */ - public static boolean lenientNodeBooleanValue(Object node) { - if (node instanceof Boolean) { - return (Boolean) node; + public static boolean nodeBooleanValue(Object node, boolean defaultValue) { + String nodeValue = node == null ? null : node.toString(); + return Booleans.parseBoolean(nodeValue, defaultValue); + } + + public static boolean nodeBooleanValue(Object node, String name) { + try { + return nodeBooleanValue(node); + } catch (IllegalArgumentException ex) { + throw new IllegalArgumentException("Could not convert [" + name + "] to boolean", ex); } - if (node instanceof Number) { - return ((Number) node).intValue() != 0; - } - String value = node.toString(); - return !(value.equals("false") || value.equals("0") || value.equals("off")); } public static boolean nodeBooleanValue(Object node) { - switch (node.toString()) { - case "true": - return true; - case "false": - return false; - default: - throw new IllegalArgumentException("Can't parse boolean value [" + node + "], expected [true] or [false]"); - } + return Booleans.parseBoolean(node.toString()); } public static TimeValue nodeTimeValue(Object node, TimeValue defaultValue) { diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java index 7413f05f583..56dda843c45 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java @@ -25,6 +25,7 @@ import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.FastStringReader; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentGenerator; @@ -74,32 +75,32 @@ public class YamlXContent implements XContent { } @Override - public XContentParser createParser(String content) throws IOException { - return new YamlXContentParser(yamlFactory.createParser(new FastStringReader(content))); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, String content) throws IOException { + return new YamlXContentParser(xContentRegistry, yamlFactory.createParser(new FastStringReader(content))); } @Override - public XContentParser createParser(InputStream is) throws IOException { - return new YamlXContentParser(yamlFactory.createParser(is)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, InputStream is) throws IOException { + return new YamlXContentParser(xContentRegistry, yamlFactory.createParser(is)); } @Override - public XContentParser createParser(byte[] data) throws IOException { - return new YamlXContentParser(yamlFactory.createParser(data)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, byte[] data) throws IOException { + return new YamlXContentParser(xContentRegistry, yamlFactory.createParser(data)); } @Override - public XContentParser createParser(byte[] data, int offset, int length) throws IOException { - return new YamlXContentParser(yamlFactory.createParser(data, offset, length)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, byte[] data, int offset, int length) throws IOException { + return new YamlXContentParser(xContentRegistry, yamlFactory.createParser(data, offset, length)); } @Override - public XContentParser createParser(BytesReference bytes) throws IOException { - return createParser(bytes.streamInput()); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, BytesReference bytes) throws IOException { + return createParser(xContentRegistry, bytes.streamInput()); } @Override - public XContentParser createParser(Reader reader) throws IOException { - return new YamlXContentParser(yamlFactory.createParser(reader)); + public XContentParser createParser(NamedXContentRegistry xContentRegistry, Reader reader) throws IOException { + return new YamlXContentParser(xContentRegistry, yamlFactory.createParser(reader)); } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java index d2c53c8a020..0d969c21a0f 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java @@ -20,20 +20,14 @@ package org.elasticsearch.common.xcontent.yaml; import com.fasterxml.jackson.core.JsonGenerator; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContentGenerator; import java.io.OutputStream; -import java.util.Collections; import java.util.Set; public class YamlXContentGenerator extends JsonXContentGenerator { - public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os) { - this(jsonGenerator, os, Collections.emptySet(), Collections.emptySet()); - } - public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, Set includes, Set excludes) { super(jsonGenerator, os, includes, excludes); } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java index 5efceac7dcf..c2fdcfa740b 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java @@ -20,13 +20,15 @@ package org.elasticsearch.common.xcontent.yaml; import com.fasterxml.jackson.core.JsonParser; + +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContentParser; public class YamlXContentParser extends JsonXContentParser { - public YamlXContentParser(JsonParser parser) { - super(parser); + public YamlXContentParser(NamedXContentRegistry xContentRegistry, JsonParser parser) { + super(xContentRegistry, parser); } @Override diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 98ce54428c7..ea3ae0c919b 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -30,6 +30,7 @@ import java.util.function.Supplier; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -54,8 +55,9 @@ public class DiscoveryModule { private final Discovery discovery; - public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, NetworkService networkService, - ClusterService clusterService, List plugins) { + public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, ClusterService clusterService, + List plugins) { final UnicastHostsProvider hostsProvider; Map> hostProviders = new HashMap<>(); @@ -78,10 +80,12 @@ public class DiscoveryModule { } Map> discoveryTypes = new HashMap<>(); - discoveryTypes.put("zen", () -> new ZenDiscovery(settings, threadPool, transportService, clusterService, hostsProvider)); + discoveryTypes.put("zen", + () -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, clusterService, hostsProvider)); discoveryTypes.put("none", () -> new NoneDiscovery(settings, clusterService, clusterService.getClusterSettings())); for (DiscoveryPlugin plugin : plugins) { - plugin.getDiscoveryTypes(threadPool, transportService, clusterService, hostsProvider).entrySet().forEach(entry -> { + plugin.getDiscoveryTypes(threadPool, transportService, namedWriteableRegistry, + clusterService, hostsProvider).entrySet().forEach(entry -> { if (discoveryTypes.put(entry.getKey(), entry.getValue()) != null) { throw new IllegalArgumentException("Cannot register discovery type [" + entry.getKey() + "] twice"); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java index 7116597bdaf..f0de15da9ae 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java @@ -24,7 +24,6 @@ import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -174,7 +173,7 @@ public class ElectMasterService extends AbstractComponent { * Returns the given nodes sorted by likelihood of being elected as master, most likely first. * Non-master nodes are not removed but are rather put in the end */ - public static List sortByMasterLikelihood(Iterable nodes) { + static List sortByMasterLikelihood(Iterable nodes) { ArrayList sortedNodes = CollectionUtils.iterableAsArrayList(nodes); CollectionUtil.introSort(sortedNodes, ElectMasterService::compareNodes); return sortedNodes; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index c8527168198..c65542093d3 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -170,7 +170,7 @@ public class MembershipAction extends AbstractComponent { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - this.state = ClusterState.Builder.readFrom(in, localNode.get()); + this.state = ClusterState.readFrom(in, localNode.get()); } @Override diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 74dbf835b58..2d84f5f863d 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -24,12 +24,11 @@ import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; -import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.NotMasterException; -import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; @@ -59,7 +58,6 @@ public class NodeJoinController extends AbstractComponent { private final ClusterService clusterService; private final AllocationService allocationService; private final ElectMasterService electMaster; - private final DiscoverySettings discoverySettings; private final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(); // this is set while trying to become a master @@ -68,12 +66,11 @@ public class NodeJoinController extends AbstractComponent { public NodeJoinController(ClusterService clusterService, AllocationService allocationService, ElectMasterService electMaster, - DiscoverySettings discoverySettings, Settings settings) { + Settings settings) { super(settings); this.clusterService = clusterService; this.allocationService = allocationService; this.electMaster = electMaster; - this.discoverySettings = discoverySettings; } /** @@ -408,8 +405,9 @@ public class NodeJoinController extends AbstractComponent { class JoinTaskExecutor implements ClusterStateTaskExecutor { @Override - public BatchResult execute(ClusterState currentState, List joiningNodes) throws Exception { - final BatchResult.Builder results = BatchResult.builder(); + public ClusterTasksResult execute(ClusterState currentState, List joiningNodes) throws Exception { + final ClusterTasksResult.Builder results = ClusterTasksResult.builder(); + final DiscoveryNodes currentNodes = currentState.nodes(); boolean nodesChanged = false; ClusterState.Builder newState; @@ -471,8 +469,6 @@ public class NodeJoinController extends AbstractComponent { DiscoveryNodes currentNodes = currentState.nodes(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); - ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()) - .removeGlobalBlock(discoverySettings.getNoMasterBlock()).build(); for (final DiscoveryNode joiningNode : joiningNodes) { final DiscoveryNode nodeWithSameId = nodesBuilder.get(joiningNode.getId()); if (nodeWithSameId != null && nodeWithSameId.equals(joiningNode) == false) { @@ -490,7 +486,7 @@ public class NodeJoinController extends AbstractComponent { // now trim any left over dead nodes - either left there when the previous master stepped down // or removed by us above - ClusterState tmpState = ClusterState.builder(currentState).nodes(nodesBuilder).blocks(clusterBlocks).build(); + ClusterState tmpState = ClusterState.builder(currentState).nodes(nodesBuilder).build(); return ClusterState.builder(allocationService.deassociateDeadNodes(tmpState, false, "removed dead nodes on election")); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java index 9f0d3576c4b..4150783a8fd 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -34,6 +35,8 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -79,6 +82,7 @@ public class PublishClusterStateAction extends AbstractComponent { } private final TransportService transportService; + private final NamedWriteableRegistry namedWriteableRegistry; private final Supplier clusterStateSupplier; private final NewPendingClusterStateListener newPendingClusterStatelistener; private final DiscoverySettings discoverySettings; @@ -88,12 +92,14 @@ public class PublishClusterStateAction extends AbstractComponent { public PublishClusterStateAction( Settings settings, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, Supplier clusterStateSupplier, NewPendingClusterStateListener listener, DiscoverySettings discoverySettings, ClusterName clusterName) { super(settings); this.transportService = transportService; + this.namedWriteableRegistry = namedWriteableRegistry; this.clusterStateSupplier = clusterStateSupplier; this.newPendingClusterStatelistener = listener; this.discoverySettings = discoverySettings; @@ -370,33 +376,37 @@ public class PublishClusterStateAction extends AbstractComponent { protected void handleIncomingClusterStateRequest(BytesTransportRequest request, TransportChannel channel) throws IOException { Compressor compressor = CompressorFactory.compressor(request.bytes()); - StreamInput in; - if (compressor != null) { - in = compressor.streamInput(request.bytes().streamInput()); - } else { - in = request.bytes().streamInput(); - } - in.setVersion(request.version()); - synchronized (lastSeenClusterStateMutex) { - final ClusterState incomingState; - // If true we received full cluster state - otherwise diffs - if (in.readBoolean()) { - incomingState = ClusterState.Builder.readFrom(in, clusterStateSupplier.get().nodes().getLocalNode()); - logger.debug("received full cluster state version [{}] with size [{}]", incomingState.version(), request.bytes().length()); - } else if (lastSeenClusterState != null) { - Diff diff = lastSeenClusterState.readDiffFrom(in); - incomingState = diff.apply(lastSeenClusterState); - logger.debug("received diff cluster state version [{}] with uuid [{}], diff size [{}]", - incomingState.version(), incomingState.stateUUID(), request.bytes().length()); - } else { - logger.debug("received diff for but don't have any local cluster state - requesting full state"); - throw new IncompatibleClusterStateVersionException("have no local cluster state"); + StreamInput in = request.bytes().streamInput(); + try { + if (compressor != null) { + in = compressor.streamInput(in); } - // sanity check incoming state - validateIncomingState(incomingState, lastSeenClusterState); + in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry); + in.setVersion(request.version()); + synchronized (lastSeenClusterStateMutex) { + final ClusterState incomingState; + // If true we received full cluster state - otherwise diffs + if (in.readBoolean()) { + incomingState = ClusterState.readFrom(in, clusterStateSupplier.get().nodes().getLocalNode()); + logger.debug("received full cluster state version [{}] with size [{}]", incomingState.version(), + request.bytes().length()); + } else if (lastSeenClusterState != null) { + Diff diff = ClusterState.readDiffFrom(in, lastSeenClusterState.nodes().getLocalNode()); + incomingState = diff.apply(lastSeenClusterState); + logger.debug("received diff cluster state version [{}] with uuid [{}], diff size [{}]", + incomingState.version(), incomingState.stateUUID(), request.bytes().length()); + } else { + logger.debug("received diff for but don't have any local cluster state - requesting full state"); + throw new IncompatibleClusterStateVersionException("have no local cluster state"); + } + // sanity check incoming state + validateIncomingState(incomingState, lastSeenClusterState); - pendingStatesQueue.addPending(incomingState); - lastSeenClusterState = incomingState; + pendingStatesQueue.addPending(incomingState); + lastSeenClusterState = incomingState; + } + } finally { + IOUtils.close(in); } channel.sendResponse(TransportResponse.Empty.INSTANCE); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index 4a6006c1a04..6658913cce6 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -23,13 +23,12 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.ElasticsearchException; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -44,10 +43,14 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; +import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.ConnectionProfile; +import org.elasticsearch.transport.NodeNotConnectedException; import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.Transport.Connection; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; @@ -60,8 +63,8 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; -import java.util.HashSet; +import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Locale; @@ -70,18 +73,17 @@ import java.util.Objects; import java.util.Queue; import java.util.Set; import java.util.concurrent.Callable; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; -import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -116,22 +118,19 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { private volatile PingContextProvider contextProvider; - private final AtomicInteger pingHandlerIdGenerator = new AtomicInteger(); + private final AtomicInteger pingingRoundIdGenerator = new AtomicInteger(); - // used to generate unique ids for nodes/address we temporarily connect to - private final AtomicInteger unicastNodeIdGenerator = new AtomicInteger(); - - // used as a node id prefix for nodes/address we temporarily connect to + // used as a node id prefix for configured unicast host nodes/address private static final String UNICAST_NODE_PREFIX = "#zen_unicast_"; - private final Map receivedResponses = newConcurrentMap(); + private final Map activePingingRounds = newConcurrentMap(); // a list of temporal responses a node will return for a request (holds responses from other nodes) private final Queue temporalResponses = ConcurrentCollections.newQueue(); private final UnicastHostsProvider hostsProvider; - private final ExecutorService unicastZenPingExecutorService; + protected final EsThreadPoolExecutor unicastZenPingExecutorService; private final TimeValue resolveTimeout; @@ -146,15 +145,14 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { this.hostsProvider = unicastHostsProvider; this.concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings); - final List hosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings); - if (hosts.isEmpty()) { + if (DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.exists(settings)) { + configuredHosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings); + // we only limit to 1 addresses, makes no sense to ping 100 ports + limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT; + } else { // if unicast hosts are not specified, fill with simple defaults on the local machine configuredHosts = transportService.getLocalAddresses(); limitPortCounts = LIMIT_LOCAL_PORTS_COUNT; - } else { - configuredHosts = hosts; - // we only limit to 1 addresses, makes no sense to ping 100 ports - limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT; } resolveTimeout = DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings); logger.debug( @@ -164,7 +162,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { resolveTimeout); transportService.registerRequestHandler(ACTION_NAME, UnicastPingRequest::new, ThreadPool.Names.SAME, - new UnicastPingRequestHandler()); + new UnicastPingRequestHandler()); final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]"); unicastZenPingExecutorService = EsExecutors.newScaling( @@ -186,23 +184,23 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { * @param hosts the hosts to resolve * @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport) * @param transportService the transport service - * @param idGenerator the generator to supply unique ids for each discovery node + * @param nodeId_prefix a prefix to use for node ids * @param resolveTimeout the timeout before returning from hostname lookups * @return a list of discovery nodes with resolved transport addresses */ - public static List resolveDiscoveryNodes( + public static List resolveHostsLists( final ExecutorService executorService, final Logger logger, final List hosts, final int limitPortCounts, final TransportService transportService, - final Supplier idGenerator, + final String nodeId_prefix, final TimeValue resolveTimeout) throws InterruptedException { Objects.requireNonNull(executorService); Objects.requireNonNull(logger); Objects.requireNonNull(hosts); Objects.requireNonNull(transportService); - Objects.requireNonNull(idGenerator); + Objects.requireNonNull(nodeId_prefix); Objects.requireNonNull(resolveTimeout); if (resolveTimeout.nanos() < 0) { throw new IllegalArgumentException("resolve timeout must be non-negative but was [" + resolveTimeout + "]"); @@ -211,7 +209,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { final List> callables = hosts .stream() - .map(hn -> (Callable)() -> transportService.addressesFromString(hn, limitPortCounts)) + .map(hn -> (Callable) () -> transportService.addressesFromString(hn, limitPortCounts)) .collect(Collectors.toList()); final List> futures = executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS); @@ -226,11 +224,11 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { try { final TransportAddress[] addresses = future.get(); logger.trace("resolved host [{}] to {}", hostname, addresses); - for (final TransportAddress address : addresses) { + for (int addressId = 0; addressId < addresses.length; addressId++) { discoveryNodes.add( new DiscoveryNode( - idGenerator.get(), - address, + nodeId_prefix + hostname + "_" + addressId + "#", + addresses[addressId], emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion())); @@ -249,8 +247,8 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { @Override public void close() { - ThreadPool.terminate(unicastZenPingExecutorService, 0, TimeUnit.SECONDS); - Releasables.close(receivedResponses.values()); + ThreadPool.terminate(unicastZenPingExecutorService, 10, TimeUnit.SECONDS); + Releasables.close(activePingingRounds.values()); closed = true; } @@ -266,106 +264,106 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { temporalResponses.clear(); } - // test only - Collection pingAndWait(TimeValue duration) { - final AtomicReference> response = new AtomicReference<>(); - final CountDownLatch latch = new CountDownLatch(1); - ping(pings -> { - response.set(pings); - latch.countDown(); - }, duration); - try { - latch.await(); - return response.get(); - } catch (InterruptedException e) { - return null; - } - } - /** - * Sends three rounds of pings notifying the specified {@link PingListener} when pinging is complete. Pings are sent after resolving + * Sends three rounds of pings notifying the specified {@link Consumer} when pinging is complete. Pings are sent after resolving * configured unicast hosts to their IP address (subject to DNS caching within the JVM). A batch of pings is sent, then another batch * of pings is sent at half the specified {@link TimeValue}, and then another batch of pings is sent at the specified {@link TimeValue}. * The pings that are sent carry a timeout of 1.25 times the specified {@link TimeValue}. When pinging each node, a connection and * handshake is performed, with a connection timeout of the specified {@link TimeValue}. * - * @param listener the callback when pinging is complete - * @param duration the timeout for various components of the pings + * @param resultsConsumer the callback when pinging is complete + * @param duration the timeout for various components of the pings */ @Override - public void ping(final PingListener listener, final TimeValue duration) { - final List resolvedDiscoveryNodes; + public void ping(final Consumer resultsConsumer, final TimeValue duration) { + ping(resultsConsumer, duration, duration); + } + + /** + * a variant of {@link #ping(Consumer, TimeValue)}, but allows separating the scheduling duration + * from the duration used for request level time outs. This is useful for testing + */ + protected void ping(final Consumer resultsConsumer, + final TimeValue scheduleDuration, + final TimeValue requestDuration) { + final List seedNodes; try { - resolvedDiscoveryNodes = resolveDiscoveryNodes( + seedNodes = resolveHostsLists( unicastZenPingExecutorService, logger, configuredHosts, limitPortCounts, transportService, - () -> UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#", + UNICAST_NODE_PREFIX, resolveTimeout); } catch (InterruptedException e) { throw new RuntimeException(e); } - final SendPingsHandler sendPingsHandler = new SendPingsHandler(pingHandlerIdGenerator.incrementAndGet()); - try { - receivedResponses.put(sendPingsHandler.id(), sendPingsHandler); - try { - sendPings(duration, null, sendPingsHandler, resolvedDiscoveryNodes); - } catch (RejectedExecutionException e) { - logger.debug("Ping execution rejected", e); - // The RejectedExecutionException can come from the fact unicastZenPingExecutorService is at its max down in sendPings - // But don't bail here, we can retry later on after the send ping has been scheduled. + seedNodes.addAll(hostsProvider.buildDynamicNodes()); + final DiscoveryNodes nodes = contextProvider.nodes(); + // add all possible master nodes that were active in the last known cluster configuration + for (ObjectCursor masterNode : nodes.getMasterNodes().values()) { + seedNodes.add(masterNode.value); + } + + final ConnectionProfile connectionProfile = + ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, requestDuration, requestDuration); + final PingingRound pingingRound = new PingingRound(pingingRoundIdGenerator.incrementAndGet(), seedNodes, resultsConsumer, + nodes.getLocalNode(), connectionProfile); + activePingingRounds.put(pingingRound.id(), pingingRound); + final AbstractRunnable pingSender = new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + if (e instanceof AlreadyClosedException == false) { + logger.warn("unexpected error while pinging", e); + } } - threadPool.schedule(TimeValue.timeValueMillis(duration.millis() / 2), ThreadPool.Names.GENERIC, new AbstractRunnable() { - @Override - protected void doRun() { - sendPings(duration, null, sendPingsHandler, resolvedDiscoveryNodes); - threadPool.schedule(TimeValue.timeValueMillis(duration.millis() / 2), ThreadPool.Names.GENERIC, new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - sendPings(duration, TimeValue.timeValueMillis(duration.millis() / 2), sendPingsHandler, resolvedDiscoveryNodes); - sendPingsHandler.close(); - listener.onPing(sendPingsHandler.pingCollection().toList()); - for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) { - logger.trace("[{}] disconnecting from {}", sendPingsHandler.id(), node); - transportService.disconnectFromNode(node); - } - } + @Override + protected void doRun() throws Exception { + sendPings(requestDuration, pingingRound); + } + }; + threadPool.generic().execute(pingSender); + threadPool.schedule(TimeValue.timeValueMillis(scheduleDuration.millis() / 3), ThreadPool.Names.GENERIC, pingSender); + threadPool.schedule(TimeValue.timeValueMillis(scheduleDuration.millis() / 3 * 2), ThreadPool.Names.GENERIC, pingSender); + threadPool.schedule(scheduleDuration, ThreadPool.Names.GENERIC, new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + finishPingingRound(pingingRound); + } - @Override - public void onFailure(Exception e) { - logger.debug("Ping execution failed", e); - sendPingsHandler.close(); - } - }); - } - - @Override - public void onFailure(Exception e) { - logger.debug("Ping execution failed", e); - sendPingsHandler.close(); - } - }); - } catch (EsRejectedExecutionException ex) { // TODO: remove this once ScheduledExecutor has support for AbstractRunnable - sendPingsHandler.close(); - // we are shutting down - } catch (Exception e) { - sendPingsHandler.close(); - throw new ElasticsearchException("Ping execution failed", e); - } + @Override + public void onFailure(Exception e) { + logger.warn("unexpected error while finishing pinging round", e); + } + }); } - class SendPingsHandler implements Releasable { + // for testing + protected void finishPingingRound(PingingRound pingingRound) { + pingingRound.close(); + } + + protected class PingingRound implements Releasable { private final int id; - private final Set nodeToDisconnect = ConcurrentCollections.newConcurrentSet(); + private final Map tempConnections = new HashMap<>(); + private final KeyedLock connectionLock = new KeyedLock<>(true); private final PingCollection pingCollection; + private final List seedNodes; + private final Consumer pingListener; + private final DiscoveryNode localNode; + private final ConnectionProfile connectionProfile; private AtomicBoolean closed = new AtomicBoolean(false); - SendPingsHandler(int id) { + PingingRound(int id, List seedNodes, Consumer resultsConsumer, DiscoveryNode localNode, + ConnectionProfile connectionProfile) { this.id = id; + this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes)); + this.pingListener = resultsConsumer; + this.localNode = localNode; + this.connectionProfile = connectionProfile; this.pingCollection = new PingCollection(); } @@ -377,154 +375,174 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { return this.closed.get(); } - public PingCollection pingCollection() { - return pingCollection; + public List getSeedNodes() { + ensureOpen(); + return seedNodes; + } + + public Connection getOrConnect(DiscoveryNode node) throws IOException { + Connection result; + try (Releasable ignore = connectionLock.acquire(node.getAddress())) { + result = tempConnections.get(node.getAddress()); + if (result == null) { + ensureOpen(); + boolean success = false; + logger.trace("[{}] opening connection to [{}]", id(), node); + result = transportService.openConnection(node, connectionProfile); + try { + transportService.handshake(result, connectionProfile.getHandshakeTimeout().millis()); + synchronized (this) { + // acquire lock and check if closed, to prevent leaving an open connection after closing + ensureOpen(); + Connection existing = tempConnections.put(node.getAddress(), result); + assert existing == null; + success = true; + } + } finally { + if (success == false) { + logger.trace("[{}] closing connection to [{}] due to failure", id(), node); + IOUtils.closeWhileHandlingException(result); + } + } + } + } + return result; + } + + private void ensureOpen() { + if (isClosed()) { + throw new AlreadyClosedException("pinging round [" + id + "] is finished"); + } + } + + public void addPingResponseToCollection(PingResponse pingResponse) { + if (localNode.equals(pingResponse.node()) == false) { + pingCollection.addPing(pingResponse); + } } @Override public void close() { - if (closed.compareAndSet(false, true)) { - receivedResponses.remove(id); + List toClose = null; + synchronized (this) { + if (closed.compareAndSet(false, true)) { + activePingingRounds.remove(id); + toClose = new ArrayList<>(tempConnections.values()); + tempConnections.clear(); + } } + if (toClose != null) { + // we actually closed + try { + pingListener.accept(pingCollection); + } finally { + IOUtils.closeWhileHandlingException(toClose); + } + } + } + + public ConnectionProfile getConnectionProfile() { + return connectionProfile; } } - void sendPings( - final TimeValue timeout, - @Nullable TimeValue waitTime, - final SendPingsHandler sendPingsHandler, - final List resolvedDiscoveryNodes) { + protected void sendPings(final TimeValue timeout, final PingingRound pingingRound) { final UnicastPingRequest pingRequest = new UnicastPingRequest(); - pingRequest.id = sendPingsHandler.id(); + pingRequest.id = pingingRound.id(); pingRequest.timeout = timeout; DiscoveryNodes discoNodes = contextProvider.nodes(); pingRequest.pingResponse = createPingResponse(discoNodes); - HashSet nodesToPingSet = new HashSet<>(); - for (PingResponse temporalResponse : temporalResponses) { - // Only send pings to nodes that have the same cluster name. - if (clusterName.equals(temporalResponse.clusterName())) { - nodesToPingSet.add(temporalResponse.node()); - } - } - nodesToPingSet.addAll(hostsProvider.buildDynamicNodes()); + Set nodesFromResponses = temporalResponses.stream().map(pingResponse -> { + assert clusterName.equals(pingResponse.clusterName()) : + "got a ping request from a different cluster. expected " + clusterName + " got " + pingResponse.clusterName(); + return pingResponse.node(); + }).collect(Collectors.toSet()); - // add all possible master nodes that were active in the last known cluster configuration - for (ObjectCursor masterNode : discoNodes.getMasterNodes().values()) { - nodesToPingSet.add(masterNode.value); - } + // dedup by address + final Map uniqueNodesByAddress = + Stream.concat(pingingRound.getSeedNodes().stream(), nodesFromResponses.stream()) + .collect(Collectors.toMap(DiscoveryNode::getAddress, Function.identity(), (n1, n2) -> n1)); - // sort the nodes by likelihood of being an active master - List sortedNodesToPing = ElectMasterService.sortByMasterLikelihood(nodesToPingSet); - // add the configured hosts first - final List nodesToPing = new ArrayList<>(resolvedDiscoveryNodes.size() + sortedNodesToPing.size()); - nodesToPing.addAll(resolvedDiscoveryNodes); - nodesToPing.addAll(sortedNodesToPing); - - final CountDownLatch latch = new CountDownLatch(nodesToPing.size()); - for (final DiscoveryNode node : nodesToPing) { - // make sure we are connected - final boolean nodeFoundByAddress; - DiscoveryNode nodeToSend = discoNodes.findByAddress(node.getAddress()); - if (nodeToSend != null) { - nodeFoundByAddress = true; - } else { - nodeToSend = node; - nodeFoundByAddress = false; - } - - if (!transportService.nodeConnected(nodeToSend)) { - if (sendPingsHandler.isClosed()) { - return; + // resolve what we can via the latest cluster state + final Set nodesToPing = uniqueNodesByAddress.values().stream() + .map(node -> { + DiscoveryNode foundNode = discoNodes.findByAddress(node.getAddress()); + if (foundNode == null) { + return node; + } else { + return foundNode; } - // if we find on the disco nodes a matching node by address, we are going to restore the connection - // anyhow down the line if its not connected... - // if we can't resolve the node, we don't know and we have to clean up after pinging. We do have - // to make sure we don't disconnect a true node which was temporarily removed from the DiscoveryNodes - // but will be added again during the pinging. We therefore create a new temporary node - if (!nodeFoundByAddress) { - if (!nodeToSend.getId().startsWith(UNICAST_NODE_PREFIX)) { - DiscoveryNode tempNode = new DiscoveryNode("", - UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "_" + nodeToSend.getId() + "#", - UUIDs.randomBase64UUID(), nodeToSend.getHostName(), nodeToSend.getHostAddress(), nodeToSend.getAddress(), - nodeToSend.getAttributes(), nodeToSend.getRoles(), nodeToSend.getVersion()); + }).collect(Collectors.toSet()); - logger.trace("replacing {} with temp node {}", nodeToSend, tempNode); - nodeToSend = tempNode; - } - sendPingsHandler.nodeToDisconnect.add(nodeToSend); - } - // fork the connection to another thread - final DiscoveryNode finalNodeToSend = nodeToSend; - unicastZenPingExecutorService.execute(new Runnable() { - @Override - public void run() { - if (sendPingsHandler.isClosed()) { - return; - } - boolean success = false; - try { - // connect to the node, see if we manage to do it, if not, bail - if (!nodeFoundByAddress) { - logger.trace("[{}] connecting (light) to {}", sendPingsHandler.id(), finalNodeToSend); - transportService.connectToNodeAndHandshake(finalNodeToSend, timeout.getMillis()); - } else { - logger.trace("[{}] connecting to {}", sendPingsHandler.id(), finalNodeToSend); - transportService.connectToNode(finalNodeToSend); - } - logger.trace("[{}] connected to {}", sendPingsHandler.id(), node); - if (receivedResponses.containsKey(sendPingsHandler.id())) { - // we are connected and still in progress, send the ping request - sendPingRequestToNode(sendPingsHandler.id(), timeout, pingRequest, latch, node, finalNodeToSend); - } else { - // connect took too long, just log it and bail - latch.countDown(); - logger.trace("[{}] connect to {} was too long outside of ping window, bailing", - sendPingsHandler.id(), node); - } - success = true; - } catch (ConnectTransportException e) { - // can't connect to the node - this is a more common path! - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[{}] failed to connect to {}", sendPingsHandler.id(), finalNodeToSend), e); - } catch (RemoteTransportException e) { - // something went wrong on the other side - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "[{}] received a remote error as a response to ping {}", sendPingsHandler.id(), finalNodeToSend), e); - } catch (Exception e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "[{}] failed send ping to {}", sendPingsHandler.id(), finalNodeToSend), e); - } finally { - if (!success) { - latch.countDown(); - } - } - } - }); - } else { - sendPingRequestToNode(sendPingsHandler.id(), timeout, pingRequest, latch, node, nodeToSend); - } - } - if (waitTime != null) { - try { - latch.await(waitTime.millis(), TimeUnit.MILLISECONDS); - } catch (InterruptedException e) { - // ignore - } - } + nodesToPing.forEach(node -> sendPingRequestToNode(node, timeout, pingingRound, pingRequest)); } - private void sendPingRequestToNode(final int id, final TimeValue timeout, final UnicastPingRequest pingRequest, - final CountDownLatch latch, final DiscoveryNode node, final DiscoveryNode nodeToSend) { - logger.trace("[{}] sending to {}", id, nodeToSend); - transportService.sendRequest(nodeToSend, ACTION_NAME, pingRequest, TransportRequestOptions.builder() - .withTimeout((long) (timeout.millis() * 1.25)).build(), new TransportResponseHandler() { + private void sendPingRequestToNode(final DiscoveryNode node, TimeValue timeout, final PingingRound pingingRound, + final UnicastPingRequest pingRequest) { + submitToExecutor(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + Connection connection = null; + if (transportService.nodeConnected(node)) { + try { + // concurrency can still cause disconnects + connection = transportService.getConnection(node); + } catch (NodeNotConnectedException e) { + logger.trace("[{}] node [{}] just disconnected, will create a temp connection", pingingRound.id(), node); + } + } + + if (connection == null) { + connection = pingingRound.getOrConnect(node); + } + + logger.trace("[{}] sending to {}", pingingRound.id(), node); + transportService.sendRequest(connection, ACTION_NAME, pingRequest, + TransportRequestOptions.builder().withTimeout((long) (timeout.millis() * 1.25)).build(), + getPingResponseHandler(pingingRound, node)); + } + + @Override + public void onFailure(Exception e) { + if (e instanceof ConnectTransportException || e instanceof AlreadyClosedException) { + // can't connect to the node - this is more common path! + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "[{}] failed to ping {}", pingingRound.id(), node), e); + } else if (e instanceof RemoteTransportException) { + // something went wrong on the other side + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "[{}] received a remote error as a response to ping {}", pingingRound.id(), node), e); + } else { + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "[{}] failed send ping to {}", pingingRound.id(), node), e); + } + } + + @Override + public void onRejection(Exception e) { + // The RejectedExecutionException can come from the fact unicastZenPingExecutorService is at its max down in sendPings + // But don't bail here, we can retry later on after the send ping has been scheduled. + logger.debug("Ping execution rejected", e); + } + }); + } + + // for testing + protected void submitToExecutor(AbstractRunnable abstractRunnable) { + unicastZenPingExecutorService.execute(abstractRunnable); + } + + // for testing + protected TransportResponseHandler getPingResponseHandler(final PingingRound pingingRound, + final DiscoveryNode node) { + return new TransportResponseHandler() { @Override public UnicastPingResponse newInstance() { @@ -538,50 +556,36 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { @Override public void handleResponse(UnicastPingResponse response) { - logger.trace("[{}] received response from {}: {}", id, nodeToSend, Arrays.toString(response.pingResponses)); - try { - DiscoveryNodes discoveryNodes = contextProvider.nodes(); - for (PingResponse pingResponse : response.pingResponses) { - if (pingResponse.node().equals(discoveryNodes.getLocalNode())) { - // that's us, ignore - continue; - } - SendPingsHandler sendPingsHandler = receivedResponses.get(response.id); - if (sendPingsHandler == null) { - if (!closed) { - // Only log when we're not closing the node. Having no send ping handler is then expected - logger.warn("received ping response {} with no matching handler id [{}]", pingResponse, response.id); - } - } else { - sendPingsHandler.pingCollection().addPing(pingResponse); - } + logger.trace("[{}] received response from {}: {}", pingingRound.id(), node, Arrays.toString(response.pingResponses)); + if (pingingRound.isClosed()) { + if (logger.isTraceEnabled()) { + logger.trace("[{}] skipping received response from {}. already closed", pingingRound.id(), node); } - } finally { - latch.countDown(); + } else { + Stream.of(response.pingResponses).forEach(pingingRound::addPingResponseToCollection); } } @Override public void handleException(TransportException exp) { - latch.countDown(); - if (exp instanceof ConnectTransportException) { + if (exp instanceof ConnectTransportException || exp.getCause() instanceof ConnectTransportException) { // ok, not connected... - logger.trace((Supplier) () -> new ParameterizedMessage("failed to connect to {}", nodeToSend), exp); - } else { + logger.trace((Supplier) () -> new ParameterizedMessage("failed to connect to {}", node), exp); + } else if (closed == false) { logger.warn((Supplier) () -> new ParameterizedMessage("failed to send ping to [{}]", node), exp); } } - }); + }; } private UnicastPingResponse handlePingRequest(final UnicastPingRequest request) { + assert clusterName.equals(request.pingResponse.clusterName()) : + "got a ping request from a different cluster. expected " + clusterName + " got " + request.pingResponse.clusterName(); temporalResponses.add(request.pingResponse); - threadPool.schedule(TimeValue.timeValueMillis(request.timeout.millis() * 2), ThreadPool.Names.SAME, new Runnable() { - @Override - public void run() { - temporalResponses.remove(request.pingResponse); - } - }); + // add to any ongoing pinging + activePingingRounds.values().forEach(p -> p.addPingResponseToCollection(request.pingResponse)); + threadPool.schedule(TimeValue.timeValueMillis(request.timeout.millis() * 2), ThreadPool.Names.SAME, + () -> temporalResponses.remove(request.pingResponse)); List pingResponses = CollectionUtils.iterableAsArrayList(temporalResponses); pingResponses.add(createPingResponse(contextProvider.nodes())); @@ -601,11 +605,11 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { channel.sendResponse(handlePingRequest(request)); } else { throw new IllegalStateException( - String.format( - Locale.ROOT, - "mismatched cluster names; request: [%s], local: [%s]", - request.pingResponse.clusterName().value(), - clusterName.value())); + String.format( + Locale.ROOT, + "mismatched cluster names; request: [%s], local: [%s]", + request.pingResponse.clusterName().value(), + clusterName.value())); } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 16ca16d4a90..effc92a0c67 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -27,13 +27,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; -import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.NotMasterException; -import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -44,6 +43,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasables; @@ -67,11 +67,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Set; -import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.stream.Collectors; @@ -103,6 +103,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin"; private final TransportService transportService; + private final NamedWriteableRegistry namedWriteableRegistry; private final ClusterService clusterService; private AllocationService allocationService; private final ClusterName clusterName; @@ -139,11 +140,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, ClusterService clusterService, UnicastHostsProvider hostsProvider) { super(settings); this.clusterService = clusterService; this.clusterName = clusterService.getClusterName(); this.transportService = transportService; + this.namedWriteableRegistry = namedWriteableRegistry; this.discoverySettings = new DiscoverySettings(settings, clusterService.getClusterSettings()); this.zenPing = newZenPing(settings, threadPool, transportService, hostsProvider); this.electMaster = new ElectMasterService(settings); @@ -180,6 +183,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover new PublishClusterStateAction( settings, transportService, + namedWriteableRegistry, clusterService::state, new NewPendingClusterStateListener(), discoverySettings, @@ -207,25 +211,20 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover nodesFD.setLocalNode(clusterService.localNode()); joinThreadControl.start(); zenPing.start(this); - this.nodeJoinController = new NodeJoinController(clusterService, allocationService, electMaster, discoverySettings, settings); + this.nodeJoinController = new NodeJoinController(clusterService, allocationService, electMaster, settings); this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::submitRejoin, logger); } @Override public void startInitialJoin() { // start the join thread from a cluster state update. See {@link JoinThreadControl} for details. - clusterService.submitStateUpdateTask("initial_join", new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("initial_join", new LocalClusterUpdateTask() { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterTasksResult execute(ClusterState currentState) throws Exception { // do the join on a different thread, the DiscoveryService waits for 30s anyhow till it is discovered joinThreadControl.startNewThreadIfNotRunning(); - return currentState; + return unchanged(); } @Override @@ -352,7 +351,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return joinThreadControl.joinThreadActive(); } - // used for testing public ClusterState[] pendingClusterStates() { return publishClusterState.pendingStatesQueue().pendingClusterStates(); @@ -408,18 +406,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover // finalize join through the cluster state update thread final DiscoveryNode finalMasterNode = masterNode; - clusterService.submitStateUpdateTask("finalize_join (" + masterNode + ")", new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("finalize_join (" + masterNode + ")", new LocalClusterUpdateTask() { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterTasksResult execute(ClusterState currentState) throws Exception { if (!success) { // failed to join. Try again... joinThreadControl.markThreadAsDoneAndStartNew(currentThread); - return currentState; + return unchanged(); } if (currentState.getNodes().getMasterNode() == null) { @@ -427,7 +420,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover // a valid master. logger.debug("no master node is set, despite of join request completing. retrying pings."); joinThreadControl.markThreadAsDoneAndStartNew(currentThread); - return currentState; + return unchanged(); } if (!currentState.getNodes().getMasterNode().equals(finalMasterNode)) { @@ -437,7 +430,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover // Note: we do not have to start master fault detection here because it's set at {@link #processNextPendingClusterState } // when the first cluster state arrives. joinThreadControl.markThreadAsDone(currentThread); - return currentState; + return unchanged(); } @Override @@ -496,9 +489,9 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } private void submitRejoin(String source) { - clusterService.submitStateUpdateTask(source, new ClusterStateUpdateTask(Priority.IMMEDIATE) { + clusterService.submitStateUpdateTask(source, new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public ClusterState execute(ClusterState currentState) { + public ClusterTasksResult execute(ClusterState currentState) { return rejoin(currentState, source); } @@ -523,7 +516,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private final DiscoveryNode node; private final String reason; - public Task(final DiscoveryNode node, final String reason) { + Task(final DiscoveryNode node, final String reason) { this.node = node; this.reason = reason; } @@ -554,7 +547,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } @Override - public BatchResult execute(final ClusterState currentState, final List tasks) throws Exception { + public ClusterTasksResult execute(final ClusterState currentState, final List tasks) throws Exception { final DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(currentState.nodes()); boolean removed = false; for (final Task task : tasks) { @@ -568,12 +561,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover if (!removed) { // no nodes to remove, keep the current cluster state - return BatchResult.builder().successes(tasks).build(currentState); + return ClusterTasksResult.builder().successes(tasks).build(currentState); } final ClusterState remainingNodesClusterState = remainingNodesClusterState(currentState, remainingNodesBuilder); - final BatchResult.Builder resultBuilder = BatchResult.builder().successes(tasks); + final ClusterTasksResult.Builder resultBuilder = ClusterTasksResult.builder().successes(tasks); if (!electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes())) { rejoin.accept("not enough master nodes"); return resultBuilder.build(currentState); @@ -645,14 +638,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover // We only set the new value. If the master doesn't see enough nodes it will revoke it's mastership. return; } - clusterService.submitStateUpdateTask("zen-disco-mini-master-nodes-changed", new ClusterStateUpdateTask(Priority.IMMEDIATE) { + clusterService.submitStateUpdateTask("zen-disco-min-master-nodes-changed", new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public ClusterState execute(ClusterState currentState) { + public ClusterTasksResult execute(ClusterState currentState) { // check if we have enough master nodes, if not, we need to move into joining the cluster again if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) { return rejoin(currentState, "not enough master nodes on change of minimum_master_nodes from [" + prevMinimumMasterNode + "] to [" + minimumMasterNodes + "]"); } - return currentState; + return unchanged(); } @@ -685,18 +678,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover logger.info((Supplier) () -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause); - clusterService.submitStateUpdateTask("master_failed (" + masterNode + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) { + clusterService.submitStateUpdateTask("master_failed (" + masterNode + ")", new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) { + public ClusterTasksResult execute(ClusterState currentState) { if (!masterNode.equals(currentState.nodes().getMasterNode())) { // master got switched on us, no need to send anything - return currentState; + return unchanged(); } // flush any pending cluster states from old master, so it will not be set as master again @@ -710,29 +698,20 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - } - }); } void processNextPendingClusterState(String reason) { - clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", new ClusterStateUpdateTask(Priority.URGENT) { - @Override - public boolean runOnlyOnMaster() { - return false; - } - + clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", new LocalClusterUpdateTask(Priority.URGENT) { ClusterState newClusterState = null; @Override - public ClusterState execute(ClusterState currentState) { + public ClusterTasksResult execute(ClusterState currentState) { newClusterState = publishClusterState.pendingStatesQueue().getNextClusterStateToProcess(); // all pending states have been processed if (newClusterState == null) { - return currentState; + return unchanged(); } assert newClusterState.nodes().getMasterNode() != null : "received a cluster state without a master"; @@ -743,7 +722,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } if (shouldIgnoreOrRejectNewClusterState(logger, currentState, newClusterState)) { - return currentState; + return unchanged(); } // check to see that we monitor the correct master of the cluster @@ -754,7 +733,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover if (currentState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock())) { // its a fresh update from the master as we transition from a start of not having a master to having one logger.debug("got first state from fresh master [{}]", newClusterState.nodes().getMasterNodeId()); - return newClusterState; + return newState(newClusterState); } @@ -784,7 +763,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover builder.metaData(metaDataBuilder); } - return builder.build(); + return newState(builder.build()); } @Override @@ -962,7 +941,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return pingResponses; } - protected ClusterState rejoin(ClusterState clusterState, String reason) { + protected ClusterStateTaskExecutor.ClusterTasksResult rejoin(ClusterState clusterState, String reason) { // *** called from within an cluster state update task *** // assert Thread.currentThread().getName().contains(ClusterService.UPDATE_THREAD_NAME); @@ -971,29 +950,17 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover nodesFD.stop(); masterFD.stop(reason); - - ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(clusterState.blocks()) - .addGlobalBlock(discoverySettings.getNoMasterBlock()) - .build(); - - // clean the nodes, we are now not connected to anybody, since we try and reform the cluster - DiscoveryNodes discoveryNodes = new DiscoveryNodes.Builder(clusterState.nodes()).masterNodeId(null).build(); - // TODO: do we want to force a new thread if we actively removed the master? this is to give a full pinging cycle // before a decision is made. joinThreadControl.startNewThreadIfNotRunning(); - - return ClusterState.builder(clusterState) - .blocks(clusterBlocks) - .nodes(discoveryNodes) - .build(); + return LocalClusterUpdateTask.noMaster(); } private boolean localNodeMaster() { return nodes().isLocalNodeElectedMaster(); } - private ClusterState handleAnotherMaster(ClusterState localClusterState, final DiscoveryNode otherMaster, long otherClusterStateVersion, String reason) { + private ClusterStateTaskExecutor.ClusterTasksResult handleAnotherMaster(ClusterState localClusterState, final DiscoveryNode otherMaster, long otherClusterStateVersion, String reason) { assert localClusterState.nodes().isLocalNodeElectedMaster() : "handleAnotherMaster called but current node is not a master"; assert Thread.currentThread().getName().contains(ClusterService.UPDATE_THREAD_NAME) : "not called from the cluster state update thread"; @@ -1016,29 +983,27 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } catch (Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e); } - return localClusterState; + return LocalClusterUpdateTask.unchanged(); } } private ZenPing.PingCollection pingAndWait(TimeValue timeout) { - final ZenPing.PingCollection response = new ZenPing.PingCollection(); - final CountDownLatch latch = new CountDownLatch(1); + final CompletableFuture response = new CompletableFuture<>(); try { - zenPing.ping(pings -> { - response.addPings(pings); - latch.countDown(); - }, timeout); + zenPing.ping(response::complete, timeout); } catch (Exception ex) { - logger.warn("Ping execution failed", ex); - latch.countDown(); + // logged later + response.completeExceptionally(ex); } try { - latch.await(); - return response; + return response.get(); } catch (InterruptedException e) { logger.trace("pingAndWait interrupted"); - return response; + return new ZenPing.PingCollection(); + } catch (ExecutionException e) { + logger.warn("Ping execution failed", e); + return new ZenPing.PingCollection(); } } @@ -1085,12 +1050,16 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return; } logger.debug("got a ping from another master {}. resolving who should rejoin. current ping count: [{}]", pingRequest.masterNode(), pingsWhileMaster.get()); - clusterService.submitStateUpdateTask("ping from another master", new ClusterStateUpdateTask(Priority.IMMEDIATE) { + clusterService.submitStateUpdateTask("ping from another master", new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public ClusterState execute(ClusterState currentState) throws Exception { - pingsWhileMaster.set(0); - return handleAnotherMaster(currentState, pingRequest.masterNode(), pingRequest.clusterStateVersion(), "node fd ping"); + public ClusterTasksResult execute(ClusterState currentState) throws Exception { + if (currentState.nodes().isLocalNodeElectedMaster()) { + pingsWhileMaster.set(0); + return handleAnotherMaster(currentState, pingRequest.masterNode(), pingRequest.clusterStateVersion(), "node fd ping"); + } else { + return unchanged(); + } } @Override @@ -1136,15 +1105,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover class RejoinClusterRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel) throws Exception { - clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", new ClusterStateUpdateTask(Priority.IMMEDIATE) { + clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) { + public ClusterTasksResult execute(ClusterState currentState) { try { channel.sendResponse(TransportResponse.Empty.INSTANCE); } catch (Exception e) { @@ -1172,7 +1136,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private final AtomicBoolean running = new AtomicBoolean(false); private final AtomicReference currentJoinThread = new AtomicReference<>(); - public JoinThreadControl(ThreadPool threadPool) { + JoinThreadControl(ThreadPool threadPool) { this.threadPool = threadPool; } @@ -1188,7 +1152,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } /** cleans any running joining thread and calls {@link #rejoin} */ - public ClusterState stopRunningThreadAndRejoin(ClusterState clusterState, String reason) { + public ClusterStateTaskExecutor.ClusterTasksResult stopRunningThreadAndRejoin(ClusterState clusterState, String reason) { ClusterService.assertClusterStateThread(); currentJoinThread.set(null); return rejoin(clusterState, reason); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java index 75ea701dc99..622c4649db2 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java @@ -30,11 +30,11 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; @@ -42,17 +42,7 @@ public interface ZenPing extends Releasable { void start(PingContextProvider contextProvider); - void ping(PingListener listener, TimeValue timeout); - - interface PingListener { - - /** - * called when pinging is done. - * - * @param pings ping result *must - */ - void onPing(Collection pings); - } + void ping(Consumer resultsConsumer, TimeValue timeout); class PingResponse implements Streamable { @@ -191,13 +181,6 @@ public interface ZenPing extends Releasable { return false; } - /** adds multiple pings if newer than previous pings from the same node */ - public synchronized void addPings(Iterable pings) { - for (PingResponse ping : pings) { - addPing(ping); - } - } - /** serialize current pings to a list. It is guaranteed that the list contains one ping response per node */ public synchronized List toList() { return new ArrayList<>(pings.values()); diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 401f3f12f4b..e531408b57a 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -38,13 +38,13 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -372,7 +372,7 @@ public final class NodeEnvironment implements Closeable { private static NodeMetaData loadOrCreateNodeMetaData(Settings settings, Logger logger, NodePath... nodePaths) throws IOException { final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new); - NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, paths); + NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, paths); if (metaData == null) { metaData = new NodeMetaData(generateNodeId(settings)); } diff --git a/core/src/main/java/org/elasticsearch/env/NodeMetaData.java b/core/src/main/java/org/elasticsearch/env/NodeMetaData.java index 60625b1852d..38a4fce9cdc 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeMetaData.java +++ b/core/src/main/java/org/elasticsearch/env/NodeMetaData.java @@ -20,8 +20,6 @@ package org.elasticsearch.env; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -70,8 +68,7 @@ public final class NodeMetaData { return "node_id [" + nodeId + "]"; } - private static ObjectParser PARSER = new ObjectParser<>("node_meta_data", - Builder::new); + private static ObjectParser PARSER = new ObjectParser<>("node_meta_data", Builder::new); static { PARSER.declareString(Builder::setNodeId, new ParseField(NODE_ID_KEY)); @@ -110,7 +107,7 @@ public final class NodeMetaData { @Override public NodeMetaData fromXContent(XContentParser parser) throws IOException { - return PARSER.apply(parser, () -> ParseFieldMatcher.STRICT).build(); + return PARSER.apply(parser, null).build(); } }; } diff --git a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index 37277586bf7..5a38a4b2b9e 100644 --- a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -343,7 +343,7 @@ public abstract class AsyncShardFetch implements Rel private boolean valueSet; private Throwable failure; - public NodeEntry(String nodeId) { + NodeEntry(String nodeId) { this.nodeId = nodeId; } diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index 605fe20a33a..69f7af6eef5 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.service.ClusterService; @@ -57,24 +58,6 @@ public class GatewayAllocator extends AbstractComponent { this.replicaShardAllocator = new InternalReplicaShardAllocator(settings, storeAction); } - /** - * Returns true if the given shard has an async fetch pending - */ - public boolean hasFetchPending(ShardId shardId, boolean primary) { - if (primary) { - AsyncShardFetch fetch = asyncFetchStarted.get(shardId); - if (fetch != null) { - return fetch.getNumberOfInFlightFetches() > 0; - } - } else { - AsyncShardFetch fetch = asyncFetchStore.get(shardId); - if (fetch != null) { - return fetch.getNumberOfInFlightFetches() > 0; - } - } - return false; - } - public void setReallocation(final ClusterService clusterService, final RoutingService routingService) { this.routingService = routingService; clusterService.addStateApplier(event -> { @@ -137,9 +120,21 @@ public class GatewayAllocator extends AbstractComponent { replicaShardAllocator.allocateUnassigned(allocation); } + /** + * Computes and returns the design for allocating a single unassigned shard. If called on an assigned shard, + * {@link AllocateUnassignedDecision#NOT_TAKEN} is returned. + */ + public AllocateUnassignedDecision decideUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation routingAllocation) { + if (unassignedShard.primary()) { + return primaryShardAllocator.makeAllocationDecision(unassignedShard, routingAllocation, logger); + } else { + return replicaShardAllocator.makeAllocationDecision(unassignedShard, routingAllocation, logger); + } + } + class InternalAsyncFetch extends AsyncShardFetch { - public InternalAsyncFetch(Logger logger, String type, ShardId shardId, Lister, T> action) { + InternalAsyncFetch(Logger logger, String type, ShardId shardId, Lister, T> action) { super(logger, type, shardId, action); } @@ -154,18 +149,15 @@ public class GatewayAllocator extends AbstractComponent { private final TransportNodesListGatewayStartedShards startedAction; - public InternalPrimaryShardAllocator(Settings settings, TransportNodesListGatewayStartedShards startedAction) { + InternalPrimaryShardAllocator(Settings settings, TransportNodesListGatewayStartedShards startedAction) { super(settings); this.startedAction = startedAction; } @Override protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { - AsyncShardFetch fetch = asyncFetchStarted.get(shard.shardId()); - if (fetch == null) { - fetch = new InternalAsyncFetch<>(logger, "shard_started", shard.shardId(), startedAction); - asyncFetchStarted.put(shard.shardId(), fetch); - } + AsyncShardFetch fetch = + asyncFetchStarted.computeIfAbsent(shard.shardId(), shardId -> new InternalAsyncFetch<>(logger, "shard_started", shardId, startedAction)); AsyncShardFetch.FetchResult shardState = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); @@ -180,18 +172,15 @@ public class GatewayAllocator extends AbstractComponent { private final TransportNodesListShardStoreMetaData storeAction; - public InternalReplicaShardAllocator(Settings settings, TransportNodesListShardStoreMetaData storeAction) { + InternalReplicaShardAllocator(Settings settings, TransportNodesListShardStoreMetaData storeAction) { super(settings); this.storeAction = storeAction; } @Override protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { - AsyncShardFetch fetch = asyncFetchStore.get(shard.shardId()); - if (fetch == null) { - fetch = new InternalAsyncFetch<>(logger, "shard_store", shard.shardId(), storeAction); - asyncFetchStore.put(shard.shardId(), fetch); - } + AsyncShardFetch fetch = + asyncFetchStore.computeIfAbsent(shard.shardId(), shardId -> new InternalAsyncFetch<>(logger, "shard_store", shard.shardId(), storeAction)); AsyncShardFetch.FetchResult shardStores = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); if (shardStores.hasData()) { diff --git a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index b1891191500..5f75771e9e6 100644 --- a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -223,7 +223,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { fromNode = new DiscoveryNode(in); indices = new IndexMetaData[in.readVInt()]; for (int i = 0; i < indices.length; i++) { - indices[i] = IndexMetaData.Builder.readFrom(in); + indices[i] = IndexMetaData.readFrom(in); } } diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 71c3190e2ee..fb48405b725 100644 --- a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -35,6 +35,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; @@ -181,9 +182,9 @@ public abstract class MetaDataStateFormat { * Reads the state from a given file and compares the expected version against the actual version of * the state. */ - public final T read(Path file) throws IOException { + public final T read(NamedXContentRegistry namedXContentRegistry, Path file) throws IOException { try (Directory dir = newDirectory(file.getParent())) { - try (final IndexInput indexInput = dir.openInput(file.getFileName().toString(), IOContext.DEFAULT)) { + try (IndexInput indexInput = dir.openInput(file.getFileName().toString(), IOContext.DEFAULT)) { // We checksum the entire file before we even go and parse it. If it's corrupted we barf right here. CodecUtil.checksumEntireFile(indexInput); final int fileVersion = CodecUtil.checkHeader(indexInput, STATE_FILE_CODEC, MIN_COMPATIBLE_STATE_FILE_VERSION, @@ -196,8 +197,8 @@ public abstract class MetaDataStateFormat { long filePointer = indexInput.getFilePointer(); long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; try (IndexInput slice = indexInput.slice("state_xcontent", filePointer, contentSize)) { - try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(new InputStreamIndexInput(slice, - contentSize))) { + try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(namedXContentRegistry, + new InputStreamIndexInput(slice, contentSize))) { return fromXContent(parser); } } @@ -260,7 +261,7 @@ public abstract class MetaDataStateFormat { * @param dataLocations the data-locations to try. * @return the latest state or null if no state was found. */ - public T loadLatestState(Logger logger, Path... dataLocations) throws IOException { + public T loadLatestState(Logger logger, NamedXContentRegistry namedXContentRegistry, Path... dataLocations) throws IOException { List files = new ArrayList<>(); long maxStateId = -1; boolean maxStateIdIsLegacy = true; @@ -311,14 +312,14 @@ public abstract class MetaDataStateFormat { logger.debug("{}: no data for [{}], ignoring...", prefix, stateFile.toAbsolutePath()); continue; } - try (final XContentParser parser = XContentHelper.createParser(new BytesArray(data))) { + try (XContentParser parser = XContentHelper.createParser(namedXContentRegistry, new BytesArray(data))) { state = fromXContent(parser); } if (state == null) { logger.debug("{}: no data for [{}], ignoring...", prefix, stateFile.toAbsolutePath()); } } else { - state = read(stateFile); + state = read(namedXContentRegistry, stateFile); logger.trace("state id [{}] read from [{}]", id, stateFile.getFileName()); } return state; diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 1673a653a6b..b900305ab55 100644 --- a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; @@ -40,10 +41,12 @@ import java.util.function.Predicate; public class MetaStateService extends AbstractComponent { private final NodeEnvironment nodeEnv; + private final NamedXContentRegistry namedXContentRegistry; - public MetaStateService(Settings settings, NodeEnvironment nodeEnv) { + public MetaStateService(Settings settings, NodeEnvironment nodeEnv, NamedXContentRegistry namedXContentRegistry) { super(settings); this.nodeEnv = nodeEnv; + this.namedXContentRegistry = namedXContentRegistry; } /** @@ -59,7 +62,8 @@ public class MetaStateService extends AbstractComponent { metaDataBuilder = MetaData.builder(); } for (String indexFolderName : nodeEnv.availableIndexFolders()) { - IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, nodeEnv.resolveIndexFolder(indexFolderName)); + IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, + nodeEnv.resolveIndexFolder(indexFolderName)); if (indexMetaData != null) { metaDataBuilder.put(indexMetaData, false); } else { @@ -74,7 +78,7 @@ public class MetaStateService extends AbstractComponent { */ @Nullable public IndexMetaData loadIndexState(Index index) throws IOException { - return IndexMetaData.FORMAT.loadLatestState(logger, nodeEnv.indexPaths(index)); + return IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.indexPaths(index)); } /** @@ -86,7 +90,7 @@ public class MetaStateService extends AbstractComponent { if (excludeIndexPathIdsPredicate.test(indexFolderName)) { continue; } - IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, + IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.resolveIndexFolder(indexFolderName)); if (indexMetaData != null) { final String indexPathId = indexMetaData.getIndex().getUUID(); @@ -106,7 +110,7 @@ public class MetaStateService extends AbstractComponent { * Loads the global state, *without* index state, see {@link #loadFullState()} for that. */ MetaData loadGlobalState() throws IOException { - return MetaData.FORMAT.loadLatestState(logger, nodeEnv.nodeDataPaths()); + return MetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); } /** diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 2200ed1b4f2..8e85081a3c1 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -49,6 +49,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.function.Function; @@ -183,7 +184,8 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { // this shard will be picked up when the node joins and we do another allocation reroute logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", unassignedShard.index(), unassignedShard.id(), nodeShardsResult.allocationsFound); - return AllocateUnassignedDecision.no(AllocationStatus.NO_VALID_SHARD_COPY, null); + return AllocateUnassignedDecision.no(AllocationStatus.NO_VALID_SHARD_COPY, + explain ? buildNodeDecisions(null, shardState, inSyncAllocationIds) : null); } } @@ -228,7 +230,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { List nodeResults = null; if (explain) { - nodeResults = buildNodeDecisions(nodesToAllocate, inSyncAllocationIds); + nodeResults = buildNodeDecisions(nodesToAllocate, shardState, inSyncAllocationIds); } if (allocation.hasPendingAsyncFetch()) { return AllocateUnassignedDecision.no(AllocationStatus.FETCHING_SHARD_DATA, nodeResults); @@ -244,13 +246,35 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { /** * Builds a map of nodes to the corresponding allocation decisions for those nodes. */ - private static List buildNodeDecisions(NodesToAllocate nodesToAllocate, Set inSyncAllocationIds) { - return Stream.of(nodesToAllocate.yesNodeShards, nodesToAllocate.throttleNodeShards, nodesToAllocate.noNodeShards) - .flatMap(Collection::stream) - .map(dnode -> new NodeAllocationResult(dnode.nodeShardState.getNode(), - shardStoreInfo(dnode.nodeShardState, inSyncAllocationIds), - dnode.decision)) - .collect(Collectors.toList()); + private static List buildNodeDecisions(NodesToAllocate nodesToAllocate, + FetchResult fetchedShardData, + Set inSyncAllocationIds) { + List nodeResults = new ArrayList<>(); + Collection ineligibleShards; + if (nodesToAllocate != null) { + final Set discoNodes = new HashSet<>(); + nodeResults.addAll(Stream.of(nodesToAllocate.yesNodeShards, nodesToAllocate.throttleNodeShards, nodesToAllocate.noNodeShards) + .flatMap(Collection::stream) + .map(dnode -> { + discoNodes.add(dnode.nodeShardState.getNode()); + return new NodeAllocationResult(dnode.nodeShardState.getNode(), + shardStoreInfo(dnode.nodeShardState, inSyncAllocationIds), + dnode.decision); + }).collect(Collectors.toList())); + ineligibleShards = fetchedShardData.getData().values().stream().filter(shardData -> + discoNodes.contains(shardData.getNode()) == false + ).collect(Collectors.toList()); + } else { + // there were no shard copies that were eligible for being assigned the allocation, + // so all fetched shard data are ineligible shards + ineligibleShards = fetchedShardData.getData().values(); + } + + nodeResults.addAll(ineligibleShards.stream().map(shardData -> + new NodeAllocationResult(shardData.getNode(), shardStoreInfo(shardData, inSyncAllocationIds), null) + ).collect(Collectors.toList())); + + return nodeResults; } private static ShardStoreInfo shardStoreInfo(NodeGatewayStartedShards nodeShardState, Set inSyncAllocationIds) { @@ -389,7 +413,8 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { yesNodeShards.add(decidedNode); } } - return new NodesToAllocate(Collections.unmodifiableList(yesNodeShards), Collections.unmodifiableList(throttledNodeShards), Collections.unmodifiableList(noNodeShards)); + return new NodesToAllocate(Collections.unmodifiableList(yesNodeShards), Collections.unmodifiableList(throttledNodeShards), + Collections.unmodifiableList(noNodeShards)); } /** @@ -476,17 +501,21 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { * recovered on any node */ private boolean recoverOnAnyNode(IndexMetaData metaData) { - return (IndexMetaData.isOnSharedFilesystem(metaData.getSettings()) || IndexMetaData.isOnSharedFilesystem(this.settings)) - && IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING.get(metaData.getSettings(), this.settings); + // don't use the setting directly, not to trigger verbose deprecation logging + return (metaData.isOnSharedFilesystem(metaData.getSettings()) || metaData.isOnSharedFilesystem(this.settings)) + && (metaData.getSettings().getAsBooleanLenientForPreEs6Indices( + metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, deprecationLogger) || + this.settings.getAsBooleanLenientForPreEs6Indices + (metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, deprecationLogger)); } protected abstract FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation); - static class NodeShardsResult { - public final List orderedAllocationCandidates; - public final int allocationsFound; + private static class NodeShardsResult { + final List orderedAllocationCandidates; + final int allocationsFound; - public NodeShardsResult(List orderedAllocationCandidates, int allocationsFound) { + NodeShardsResult(List orderedAllocationCandidates, int allocationsFound) { this.orderedAllocationCandidates = orderedAllocationCandidates; this.allocationsFound = allocationsFound; } @@ -497,7 +526,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { final List throttleNodeShards; final List noNodeShards; - public NodesToAllocate(List yesNodeShards, List throttleNodeShards, List noNodeShards) { + NodesToAllocate(List yesNodeShards, List throttleNodeShards, List noNodeShards) { this.yesNodeShards = yesNodeShards; this.throttleNodeShards = throttleNodeShards; this.noNodeShards = noNodeShards; diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index 3a838ef3785..b91637e072f 100644 --- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -395,7 +395,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { @Nullable private final Map nodeDecisions; - public MatchingNodes(ObjectLongMap nodesToSize, @Nullable Map nodeDecisions) { + MatchingNodes(ObjectLongMap nodesToSize, @Nullable Map nodeDecisions) { this.nodesToSize = nodesToSize; this.nodeDecisions = nodeDecisions; diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 3e6769200e9..13c317c53e9 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -185,7 +185,7 @@ public class TransportNodesListGatewayMetaState extends TransportNodesAction"); - } else { - inFlightRequestsBreaker(circuitBreakerService).addWithoutBreaking(contentLength); - } - // iff we could reserve bytes for the request we need to send the response also over this channel - responseChannel = new ResourceHandlingHttpChannel(channel, circuitBreakerService, contentLength); - restController.dispatchRequest(request, responseChannel, client, threadContext); - } catch (Exception e) { - try { - responseChannel.sendResponse(new BytesRestResponse(channel, e)); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.error((Supplier) () -> - new ParameterizedMessage("failed to send failure response for uri [{}]", request.uri()), inner); - } - } - } - - void handleFavicon(RestRequest request, RestChannel channel) { - if (request.method() == RestRequest.Method.GET) { - try { - try (InputStream stream = getClass().getResourceAsStream("/config/favicon.ico")) { - ByteArrayOutputStream out = new ByteArrayOutputStream(); - Streams.copy(stream, out); - BytesRestResponse restResponse = new BytesRestResponse(RestStatus.OK, "image/x-icon", out.toByteArray()); - channel.sendResponse(restResponse); - } - } catch (IOException e) { - channel.sendResponse(new BytesRestResponse(INTERNAL_SERVER_ERROR, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)); - } - } else { - channel.sendResponse(new BytesRestResponse(FORBIDDEN, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)); - } - } - - private static final class ResourceHandlingHttpChannel implements RestChannel { - private final RestChannel delegate; - private final CircuitBreakerService circuitBreakerService; - private final int contentLength; - private final AtomicBoolean closed = new AtomicBoolean(); - - public ResourceHandlingHttpChannel(RestChannel delegate, CircuitBreakerService circuitBreakerService, int contentLength) { - this.delegate = delegate; - this.circuitBreakerService = circuitBreakerService; - this.contentLength = contentLength; - } - - @Override - public XContentBuilder newBuilder() throws IOException { - return delegate.newBuilder(); - } - - @Override - public XContentBuilder newErrorBuilder() throws IOException { - return delegate.newErrorBuilder(); - } - - @Override - public XContentBuilder newBuilder(@Nullable BytesReference autoDetectSource, boolean useFiltering) throws IOException { - return delegate.newBuilder(autoDetectSource, useFiltering); - } - - @Override - public BytesStreamOutput bytesOutput() { - return delegate.bytesOutput(); - } - - @Override - public RestRequest request() { - return delegate.request(); - } - - @Override - public boolean detailedErrorsEnabled() { - return delegate.detailedErrorsEnabled(); - } - - @Override - public void sendResponse(RestResponse response) { - close(); - delegate.sendResponse(response); - } - - private void close() { - // attempt to close once atomically - if (closed.compareAndSet(false, true) == false) { - throw new IllegalStateException("Channel is already closed"); - } - inFlightRequestsBreaker(circuitBreakerService).addWithoutBreaking(-contentLength); - } - - } - - private static CircuitBreaker inFlightRequestsBreaker(CircuitBreakerService circuitBreakerService) { - // We always obtain a fresh breaker to reflect changes to the breaker configuration. - return circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); - } -} diff --git a/core/src/main/java/org/elasticsearch/http/HttpServerTransport.java b/core/src/main/java/org/elasticsearch/http/HttpServerTransport.java index 4dc4a888d8a..134557a28ad 100644 --- a/core/src/main/java/org/elasticsearch/http/HttpServerTransport.java +++ b/core/src/main/java/org/elasticsearch/http/HttpServerTransport.java @@ -21,11 +21,13 @@ package org.elasticsearch.http; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; public interface HttpServerTransport extends LifecycleComponent { String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker"; - String HTTP_SERVER_BOSS_THREAD_NAME_PREFIX = "http_server_boss"; BoundTransportAddress boundAddress(); @@ -33,6 +35,15 @@ public interface HttpServerTransport extends LifecycleComponent { HttpStats stats(); - void httpServerAdapter(HttpServerAdapter httpServerAdapter); - + @FunctionalInterface + interface Dispatcher { + /** + * Dispatches the {@link RestRequest} to the relevant request handler or responds to the given rest channel directly if + * the request can't be handled by any request handler. + * @param request the request to dispatch + * @param channel the response channel of this request + * @param threadContext the nodes thread context + */ + void dispatch(RestRequest request, RestChannel channel, ThreadContext threadContext); + } } diff --git a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index 60bc3449d0b..b5e254aa4c2 100644 --- a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -68,6 +68,8 @@ public final class HttpTransportSettings { Setting.intSetting("http.publish_port", -1, -1, Property.NodeScope); public static final Setting SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, Property.NodeScope); + public static final Setting SETTING_HTTP_CONTENT_TYPE_REQUIRED = + Setting.boolSetting("http.content_type.required", false, Property.NodeScope); public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope); public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = diff --git a/core/src/main/java/org/elasticsearch/index/Index.java b/core/src/main/java/org/elasticsearch/index/Index.java index 25b293ad387..da94ad2ec72 100644 --- a/core/src/main/java/org/elasticsearch/index/Index.java +++ b/core/src/main/java/org/elasticsearch/index/Index.java @@ -21,8 +21,6 @@ package org.elasticsearch.index; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -42,7 +40,7 @@ public class Index implements Writeable, ToXContent { public static final Index[] EMPTY_ARRAY = new Index[0]; private static final String INDEX_UUID_KEY = "index_uuid"; private static final String INDEX_NAME_KEY = "index_name"; - private static final ObjectParser INDEX_PARSER = new ObjectParser<>("index", Builder::new); + private static final ObjectParser INDEX_PARSER = new ObjectParser<>("index", Builder::new); static { INDEX_PARSER.declareString(Builder::name, new ParseField(INDEX_NAME_KEY)); INDEX_PARSER.declareString(Builder::uuid, new ParseField(INDEX_UUID_KEY)); @@ -118,11 +116,7 @@ public class Index implements Writeable, ToXContent { } public static Index fromXContent(final XContentParser parser) throws IOException { - return INDEX_PARSER.parse(parser, () -> ParseFieldMatcher.STRICT).build(); - } - - public static final Index parseIndex(final XContentParser parser, final ParseFieldMatcherSupplier supplier) { - return INDEX_PARSER.apply(parser, supplier).build(); + return INDEX_PARSER.parse(parser, null).build(); } /** diff --git a/core/src/main/java/org/elasticsearch/index/IndexModule.java b/core/src/main/java/org/elasticsearch/index/IndexModule.java index 8389335d889..dc7021e81fc 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/core/src/main/java/org/elasticsearch/index/IndexModule.java @@ -23,10 +23,12 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.cache.query.DisabledQueryCache; @@ -47,7 +49,6 @@ import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; -import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; @@ -69,7 +70,7 @@ import java.util.function.Function; * IndexModule represents the central extension point for index level custom implementations like: *