diff --git a/Vagrantfile b/Vagrantfile index 8f0fb08ad6d..720e454a069 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -42,7 +42,7 @@ Vagrant.configure(2) do |config| # debian and it works fine. config.vm.define "debian-8" do |config| config.vm.box = "elastic/debian-8-x86_64" - deb_common config, 'echo deb http://cloudfront.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports' + deb_common config end config.vm.define "centos-6" do |config| config.vm.box = "elastic/centos-6-x86_64" @@ -114,10 +114,10 @@ SOURCE_PROMPT end def ubuntu_common(config, extra: '') - deb_common config, 'apt-add-repository -y ppa:openjdk-r/ppa > /dev/null 2>&1', 'openjdk-r-*', extra: extra + deb_common config, extra: extra end -def deb_common(config, add_openjdk_repository_command, openjdk_list, extra: '') +def deb_common(config, extra: '') # http://foo-o-rama.com/vagrant--stdin-is-not-a-tty--fix.html config.vm.provision "fix-no-tty", type: "shell" do |s| s.privileged = false @@ -127,24 +127,14 @@ def deb_common(config, add_openjdk_repository_command, openjdk_list, extra: '') update_command: "apt-get update", update_tracking_file: "/var/cache/apt/archives/last_update", install_command: "apt-get install -y", - java_package: "openjdk-8-jdk", - extra: <<-SHELL - export DEBIAN_FRONTEND=noninteractive - ls /etc/apt/sources.list.d/#{openjdk_list}.list > /dev/null 2>&1 || - (echo "==> Importing java-8 ppa" && - #{add_openjdk_repository_command} && - apt-get update) - #{extra} -SHELL - ) + extra: extra) end def rpm_common(config) provision(config, update_command: "yum check-update", update_tracking_file: "/var/cache/yum/last_update", - install_command: "yum install -y", - java_package: "java-1.8.0-openjdk-devel") + install_command: "yum install -y") end def dnf_common(config) @@ -152,8 +142,7 @@ def dnf_common(config) update_command: "dnf check-update", update_tracking_file: "/var/cache/dnf/last_update", install_command: "dnf install -y", - install_command_retries: 5, - java_package: "java-1.8.0-openjdk-devel") + install_command_retries: 5) if Vagrant.has_plugin?("vagrant-cachier") # Autodetect doesn't work.... config.cache.auto_detect = false @@ -170,7 +159,6 @@ def suse_common(config, extra) update_command: "zypper --non-interactive list-updates", update_tracking_file: "/var/cache/zypp/packages/last_update", install_command: "zypper --non-interactive --quiet install --no-recommends", - java_package: "java-1_8_0-openjdk-devel", extra: extra) end @@ -193,7 +181,6 @@ end # is cached by vagrant-cachier. # @param install_command [String] The command used to install a package. # Required. Think `apt-get install #{package}`. -# @param java_package [String] The name of the java package. Required. # @param extra [String] Extra provisioning commands run before anything else. # Optional. Used for things like setting up the ppa for Java 8. def provision(config, @@ -201,13 +188,11 @@ def provision(config, update_tracking_file: 'required', install_command: 'required', install_command_retries: 0, - java_package: 'required', extra: '') # Vagrant run ruby 2.0.0 which doesn't have required named parameters.... raise ArgumentError.new('update_command is required') if update_command == 'required' raise ArgumentError.new('update_tracking_file is required') if update_tracking_file == 'required' raise ArgumentError.new('install_command is required') if install_command == 'required' - raise ArgumentError.new('java_package is required') if java_package == 'required' config.vm.provision "bats dependencies", type: "shell", inline: <<-SHELL set -e set -o pipefail @@ -254,7 +239,10 @@ def provision(config, #{extra} - installed java || install #{java_package} + installed java || { + echo "==> Java is not installed on vagrant box ${config.vm.box}" + return 1 + } ensure tar ensure curl ensure unzip diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index fe6d7b59eb3..5a508fa1065 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -37,10 +37,7 @@ apply plugin: 'application' archivesBaseName = 'elasticsearch-benchmarks' mainClassName = 'org.openjdk.jmh.Main' -// never try to invoke tests on the benchmark project - there aren't any -check.dependsOn.remove(test) -// explicitly override the test task too in case somebody invokes 'gradle test' so it won't trip -task test(type: Test, overwrite: true) +test.enabled = false dependencies { compile("org.elasticsearch:elasticsearch:${version}") { @@ -59,7 +56,6 @@ compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-u // enable the JMH's BenchmarkProcessor to generate the final benchmark classes // needs to be added separately otherwise Gradle will quote it and javac will fail compileJava.options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"]) -compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" forbiddenApis { // classes generated by JMH can use all sorts of forbidden APIs but we have no influence at all and cannot exclude these classes diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index b047bc89da2..dfa58592a18 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -120,12 +120,15 @@ public class PluginBuildPlugin extends BuildPlugin { // add the plugin properties and metadata to test resources, so unit tests can // know about the plugin (used by test security code to statically initialize the plugin in unit tests) SourceSet testSourceSet = project.sourceSets.test - testSourceSet.output.dir(buildProperties.generatedResourcesDir, builtBy: 'pluginProperties') + testSourceSet.output.dir(buildProperties.descriptorOutput.parentFile, builtBy: 'pluginProperties') testSourceSet.resources.srcDir(pluginMetadata) // create the actual bundle task, which zips up all the files for the plugin Zip bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [project.jar, buildProperties]) { - from buildProperties // plugin properties file + from(buildProperties.descriptorOutput.parentFile) { + // plugin properties file + include(buildProperties.descriptorOutput.name) + } from pluginMetadata // metadata (eg custom security policy) from project.jar // this plugin's jar from project.configurations.runtime - project.configurations.provided // the dep jars @@ -250,19 +253,15 @@ public class PluginBuildPlugin extends BuildPlugin { protected void addNoticeGeneration(Project project) { File licenseFile = project.pluginProperties.extension.licenseFile if (licenseFile != null) { - project.bundlePlugin.into('/') { - from(licenseFile.parentFile) { - include(licenseFile.name) - } + project.bundlePlugin.from(licenseFile.parentFile) { + include(licenseFile.name) } } File noticeFile = project.pluginProperties.extension.licenseFile if (noticeFile != null) { NoticeTask generateNotice = project.tasks.create('generateNotice', NoticeTask.class) generateNotice.dependencies(project) - project.bundlePlugin.into('/') { - from(generateNotice) - } + project.bundlePlugin.from(generateNotice) } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy index 7156c2650cb..94bc0ba3e75 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy @@ -22,6 +22,7 @@ import org.elasticsearch.gradle.VersionProperties import org.gradle.api.InvalidUserDataException import org.gradle.api.Task import org.gradle.api.tasks.Copy +import org.gradle.api.tasks.OutputFile /** * Creates a plugin descriptor. @@ -29,20 +30,22 @@ import org.gradle.api.tasks.Copy class PluginPropertiesTask extends Copy { PluginPropertiesExtension extension - File generatedResourcesDir = new File(project.buildDir, 'generated-resources') + + @OutputFile + File descriptorOutput = new File(project.buildDir, 'generated-resources/plugin-descriptor.properties') PluginPropertiesTask() { - File templateFile = new File(project.buildDir, 'templates/plugin-descriptor.properties') + File templateFile = new File(project.buildDir, "templates/${descriptorOutput.name}") Task copyPluginPropertiesTemplate = project.tasks.create('copyPluginPropertiesTemplate') { doLast { - InputStream resourceTemplate = PluginPropertiesTask.getResourceAsStream('/plugin-descriptor.properties') + InputStream resourceTemplate = PluginPropertiesTask.getResourceAsStream("/${descriptorOutput.name}") templateFile.parentFile.mkdirs() templateFile.setText(resourceTemplate.getText('UTF-8'), 'UTF-8') } } + dependsOn(copyPluginPropertiesTemplate) extension = project.extensions.create('esplugin', PluginPropertiesExtension, project) - project.clean.delete(generatedResourcesDir) project.afterEvaluate { // check require properties are set if (extension.name == null) { @@ -55,8 +58,8 @@ class PluginPropertiesTask extends Copy { throw new InvalidUserDataException('classname is a required setting for esplugin') } // configure property substitution - from(templateFile) - into(generatedResourcesDir) + from(templateFile.parentFile).include(descriptorOutput.name) + into(descriptorOutput.parentFile) Map properties = generateSubstitutions() expand(properties) inputs.properties(properties) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 96b7ac42527..f7b30e774e3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -91,6 +91,7 @@ class PrecommitTasks { if (testForbidden != null) { testForbidden.configure { signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt') + signaturesURLs += getClass().getResource('/forbidden/http-signatures.txt') } } Task forbiddenApis = project.tasks.findByName('forbiddenApis') diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 3fc622ef5aa..c3dff77dfd4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -51,22 +51,18 @@ class ClusterFormationTasks { * * Returns a list of NodeInfo objects for each node in the cluster. */ - static List setup(Project project, Task task, ClusterConfiguration config) { - if (task.getEnabled() == false) { - // no need to add cluster formation tasks if the task won't run! - return - } + static List setup(Project project, String prefix, Task runner, ClusterConfiguration config) { File sharedDir = new File(project.buildDir, "cluster/shared") // first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything // in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk // such that snapshots survive failures / test runs and there is no simple way today to fix that. - Task cleanup = project.tasks.create(name: "${task.name}#prepareCluster.cleanShared", type: Delete, dependsOn: task.dependsOn.collect()) { + Task cleanup = project.tasks.create(name: "${prefix}#prepareCluster.cleanShared", type: Delete, dependsOn: runner.dependsOn.collect()) { delete sharedDir doLast { sharedDir.mkdirs() } } - List startTasks = [cleanup] + List startTasks = [] List nodes = [] if (config.numNodes < config.numBwcNodes) { throw new GradleException("numNodes must be >= numBwcNodes [${config.numNodes} < ${config.numBwcNodes}]") @@ -75,7 +71,7 @@ class ClusterFormationTasks { throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0") } // this is our current version distribution configuration we use for all kinds of REST tests etc. - String distroConfigName = "${task.name}_elasticsearchDistro" + String distroConfigName = "${prefix}_elasticsearchDistro" Configuration currentDistro = project.configurations.create(distroConfigName) configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch) if (config.bwcVersion != null && config.numBwcNodes > 0) { @@ -89,7 +85,7 @@ class ClusterFormationTasks { } configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion) for (Map.Entry entry : config.plugins.entrySet()) { - configureBwcPluginDependency("${task.name}_elasticsearchBwcPlugins", project, entry.getValue(), + configureBwcPluginDependency("${prefix}_elasticsearchBwcPlugins", project, entry.getValue(), project.configurations.elasticsearchBwcPlugins, config.bwcVersion) } project.configurations.elasticsearchBwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS) @@ -104,13 +100,14 @@ class ClusterFormationTasks { elasticsearchVersion = config.bwcVersion distro = project.configurations.elasticsearchBwcDistro } - NodeInfo node = new NodeInfo(config, i, project, task, elasticsearchVersion, sharedDir) + NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir) nodes.add(node) - startTasks.add(configureNode(project, task, cleanup, node, distro, nodes.get(0))) + Task dependsOn = startTasks.empty ? cleanup : startTasks.get(0) + startTasks.add(configureNode(project, prefix, runner, dependsOn, node, distro, nodes.get(0))) } - Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks) - task.dependsOn(wait) + Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks) + runner.dependsOn(wait) return nodes } @@ -150,58 +147,58 @@ class ClusterFormationTasks { * * @return a task which starts the node. */ - static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) { + static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) { // tasks are chained so their execution order is maintained - Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: dependsOn) { + Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) { delete node.homeDir delete node.cwd doLast { node.cwd.mkdirs() } } - setup = configureCheckPreviousTask(taskName(task, node, 'checkPrevious'), project, setup, node) - setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node) - setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration) - setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node, seedNode) + setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node) + setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node) + setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, configuration) + setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode) if (node.config.plugins.isEmpty() == false) { if (node.nodeVersion == VersionProperties.elasticsearch) { - setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node) + setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node) } else { - setup = configureCopyBwcPluginsTask(taskName(task, node, 'copyBwcPlugins'), project, setup, node) + setup = configureCopyBwcPluginsTask(taskName(prefix, node, 'copyBwcPlugins'), project, setup, node) } } // install modules for (Project module : node.config.modules) { String actionName = pluginTaskName('install', module.name, 'Module') - setup = configureInstallModuleTask(taskName(task, node, actionName), project, setup, node, module) + setup = configureInstallModuleTask(taskName(prefix, node, actionName), project, setup, node, module) } // install plugins for (Map.Entry plugin : node.config.plugins.entrySet()) { String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin') - setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue()) + setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, plugin.getValue()) } // sets up any extra config files that need to be copied over to the ES instance; // its run after plugins have been installed, as the extra config files may belong to plugins - setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node) + setup = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, setup, node) // extra setup commands for (Map.Entry command : node.config.setupCommands.entrySet()) { // the first argument is the actual script name, relative to home Object[] args = command.getValue().clone() args[0] = new File(node.homeDir, args[0].toString()) - setup = configureExecTask(taskName(task, node, command.getKey()), project, setup, node, args) + setup = configureExecTask(taskName(prefix, node, command.getKey()), project, setup, node, args) } - Task start = configureStartTask(taskName(task, node, 'start'), project, setup, node) + Task start = configureStartTask(taskName(prefix, node, 'start'), project, setup, node) if (node.config.daemonize) { - Task stop = configureStopTask(taskName(task, node, 'stop'), project, [], node) + Task stop = configureStopTask(taskName(prefix, node, 'stop'), project, [], node) // if we are running in the background, make sure to stop the server when the task completes - task.finalizedBy(stop) + runner.finalizedBy(stop) start.finalizedBy(stop) } return start @@ -648,11 +645,11 @@ class ClusterFormationTasks { } /** Returns a unique task name for this task and node configuration */ - static String taskName(Task parentTask, NodeInfo node, String action) { + static String taskName(String prefix, NodeInfo node, String action) { if (node.config.numNodes > 1) { - return "${parentTask.name}#node${node.nodeNum}.${action}" + return "${prefix}#node${node.nodeNum}.${action}" } else { - return "${parentTask.name}#${action}" + return "${prefix}#${action}" } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 73f32961fb3..59c65c684ac 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -21,7 +21,6 @@ package org.elasticsearch.gradle.test import org.apache.tools.ant.taskdefs.condition.Os import org.gradle.api.InvalidUserDataException import org.gradle.api.Project -import org.gradle.api.Task /** * A container for the files and configuration associated with a single node in a test cluster. @@ -96,17 +95,17 @@ class NodeInfo { /** the version of elasticsearch that this node runs */ String nodeVersion - /** Creates a node to run as part of a cluster for the given task */ - NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task, String nodeVersion, File sharedDir) { + /** Holds node configuration for part of a test cluster. */ + NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, String nodeVersion, File sharedDir) { this.config = config this.nodeNum = nodeNum this.sharedDir = sharedDir if (config.clusterName != null) { clusterName = config.clusterName } else { - clusterName = "${task.path.replace(':', '_').substring(1)}" + clusterName = project.path.replace(':', '_').substring(1) + '_' + prefix } - baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}") + baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}") pidFile = new File(baseDir, 'es.pid') this.nodeVersion = nodeVersion homeDir = homeDir(baseDir, config.distribution, nodeVersion) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 51bccb4fe75..075e8129e6f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -20,6 +20,7 @@ package org.elasticsearch.gradle.test import com.carrotsearch.gradle.junit4.RandomizedTestingTask import org.elasticsearch.gradle.BuildPlugin +import org.gradle.api.DefaultTask import org.gradle.api.Task import org.gradle.api.internal.tasks.options.Option import org.gradle.api.plugins.JavaBasePlugin @@ -27,12 +28,15 @@ import org.gradle.api.tasks.Input import org.gradle.util.ConfigureUtil /** - * Runs integration tests, but first starts an ES cluster, - * and passes the ES cluster info as parameters to the tests. + * A wrapper task around setting up a cluster and running rest tests. */ -public class RestIntegTestTask extends RandomizedTestingTask { +public class RestIntegTestTask extends DefaultTask { - ClusterConfiguration clusterConfig + protected ClusterConfiguration clusterConfig + + protected RandomizedTestingTask runner + + protected Task clusterInit /** Info about nodes in the integ test cluster. Note this is *not* available until runtime. */ List nodes @@ -44,35 +48,44 @@ public class RestIntegTestTask extends RandomizedTestingTask { public RestIntegTestTask() { description = 'Runs rest tests against an elasticsearch cluster.' group = JavaBasePlugin.VERIFICATION_GROUP - dependsOn(project.testClasses) - classpath = project.sourceSets.test.runtimeClasspath - testClassesDir = project.sourceSets.test.output.classesDir - clusterConfig = new ClusterConfiguration(project) + runner = project.tasks.create("${name}Runner", RandomizedTestingTask.class) + super.dependsOn(runner) + clusterInit = project.tasks.create(name: "${name}Cluster#init", dependsOn: project.testClasses) + runner.dependsOn(clusterInit) + runner.classpath = project.sourceSets.test.runtimeClasspath + runner.testClassesDir = project.sourceSets.test.output.classesDir + clusterConfig = project.extensions.create("${name}Cluster", ClusterConfiguration.class, project) // start with the common test configuration - configure(BuildPlugin.commonTestConfig(project)) + runner.configure(BuildPlugin.commonTestConfig(project)) // override/add more for rest tests - parallelism = '1' - include('**/*IT.class') - systemProperty('tests.rest.load_packaged', 'false') + runner.parallelism = '1' + runner.include('**/*IT.class') + runner.systemProperty('tests.rest.load_packaged', 'false') // we pass all nodes to the rest cluster to allow the clients to round-robin between them // this is more realistic than just talking to a single node - systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}") - systemProperty('tests.config.dir', "${-> nodes[0].confDir}") + runner.systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}") + runner.systemProperty('tests.config.dir', "${-> nodes[0].confDir}") // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass // both as separate sysprops - systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") + runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") // copy the rest spec/tests into the test resources RestSpecHack.configureDependencies(project) project.afterEvaluate { - dependsOn(RestSpecHack.configureTask(project, includePackaged)) + runner.dependsOn(RestSpecHack.configureTask(project, includePackaged)) } // this must run after all projects have been configured, so we know any project // references can be accessed as a fully configured project.gradle.projectsEvaluated { - nodes = ClusterFormationTasks.setup(project, this, clusterConfig) + if (enabled == false) { + runner.enabled = false + clusterInit.enabled = false + return // no need to add cluster formation tasks if the task won't run! + } + nodes = ClusterFormationTasks.setup(project, "${name}Cluster", runner, clusterConfig) + super.dependsOn(runner.finalizedBy) } } @@ -84,25 +97,16 @@ public class RestIntegTestTask extends RandomizedTestingTask { clusterConfig.debug = enabled; } - @Input - public void cluster(Closure closure) { - ConfigureUtil.configure(closure, clusterConfig) - } - - public ClusterConfiguration getCluster() { - return clusterConfig - } - public List getNodes() { return nodes } @Override public Task dependsOn(Object... dependencies) { - super.dependsOn(dependencies) + runner.dependsOn(dependencies) for (Object dependency : dependencies) { if (dependency instanceof Fixture) { - finalizedBy(((Fixture)dependency).stopTask) + runner.finalizedBy(((Fixture)dependency).stopTask) } } return this @@ -110,11 +114,16 @@ public class RestIntegTestTask extends RandomizedTestingTask { @Override public void setDependsOn(Iterable dependencies) { - super.setDependsOn(dependencies) + runner.setDependsOn(dependencies) for (Object dependency : dependencies) { if (dependency instanceof Fixture) { - finalizedBy(((Fixture)dependency).stopTask) + runner.finalizedBy(((Fixture)dependency).stopTask) } } } + + @Override + public Task mustRunAfter(Object... tasks) { + clusterInit.mustRunAfter(tasks) + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy index 47a559efccb..f00be89f6ae 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy @@ -43,7 +43,7 @@ public class RestTestPlugin implements Plugin { } RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class) - integTest.cluster.distribution = 'zip' // rest tests should run with the real zip + integTest.clusterConfig.distribution = 'zip' // rest tests should run with the real zip integTest.mustRunAfter(project.precommit) project.check.dependsOn(integTest) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy index a71dc59dbf9..a88152d7865 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy @@ -18,7 +18,7 @@ public class RunTask extends DefaultTask { clusterConfig.daemonize = false clusterConfig.distribution = 'zip' project.afterEvaluate { - ClusterFormationTasks.setup(project, this, clusterConfig) + ClusterFormationTasks.setup(project, name, this, clusterConfig) } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 0acb79e2bae..05632b9b26b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -1,14 +1,14 @@ package org.elasticsearch.gradle.vagrant import org.elasticsearch.gradle.FileContentsTask -import org.gradle.BuildAdapter -import org.gradle.BuildResult import org.gradle.api.* import org.gradle.api.artifacts.dsl.RepositoryHandler +import org.gradle.api.execution.TaskExecutionAdapter import org.gradle.api.internal.artifacts.dependencies.DefaultProjectDependency import org.gradle.api.tasks.Copy import org.gradle.api.tasks.Delete import org.gradle.api.tasks.Exec +import org.gradle.api.tasks.TaskState class VagrantTestPlugin implements Plugin { @@ -16,10 +16,8 @@ class VagrantTestPlugin implements Plugin { static List BOXES = [ 'centos-6', 'centos-7', - // TODO: re-enable debian once it does not have broken openjdk packages - //'debian-8', - // TODO: re-enable fedora once it does not have broken openjdk packages - //'fedora-24', + 'debian-8', + 'fedora-24', 'oel-6', 'oel-7', 'opensuse-13', @@ -125,33 +123,27 @@ class VagrantTestPlugin implements Plugin { private static void createBatsConfiguration(Project project) { project.configurations.create(BATS) - Long seed - String formattedSeed = null - String[] upgradeFromVersions - - String maybeTestsSeed = System.getProperty("tests.seed", null); + final long seed + final String formattedSeed + String maybeTestsSeed = System.getProperty("tests.seed") if (maybeTestsSeed != null) { - List seeds = maybeTestsSeed.tokenize(':') - if (seeds.size() != 0) { - String masterSeed = seeds.get(0) - seed = new BigInteger(masterSeed, 16).longValue() - formattedSeed = maybeTestsSeed + if (maybeTestsSeed.trim().isEmpty()) { + throw new GradleException("explicit tests.seed cannot be empty") } - } - if (formattedSeed == null) { + String masterSeed = maybeTestsSeed.tokenize(':').get(0) + seed = new BigInteger(masterSeed, 16).longValue() + formattedSeed = maybeTestsSeed + } else { seed = new Random().nextLong() formattedSeed = String.format("%016X", seed) } - String maybeUpdradeFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null) - if (maybeUpdradeFromVersions != null) { - upgradeFromVersions = maybeUpdradeFromVersions.split(",") - } else { - upgradeFromVersions = getVersionsFile(project) + String upgradeFromVersion = System.getProperty("tests.packaging.upgradeVersion"); + if (upgradeFromVersion == null) { + List availableVersions = getVersionsFile(project).readLines('UTF-8') + upgradeFromVersion = availableVersions[new Random(seed).nextInt(availableVersions.size())] } - String upgradeFromVersion = upgradeFromVersions[new Random(seed).nextInt(upgradeFromVersions.length)] - DISTRIBUTION_ARCHIVES.each { // Adds a dependency for the current version project.dependencies.add(BATS, project.dependencies.project(path: ":distribution:${it}", configuration: 'archives')) @@ -165,7 +157,6 @@ class VagrantTestPlugin implements Plugin { project.extensions.esvagrant.testSeed = seed project.extensions.esvagrant.formattedTestSeed = formattedSeed project.extensions.esvagrant.upgradeFromVersion = upgradeFromVersion - project.extensions.esvagrant.upgradeFromVersions = upgradeFromVersions } private static void createCleanTask(Project project) { @@ -256,22 +247,9 @@ class VagrantTestPlugin implements Plugin { contents project.extensions.esvagrant.upgradeFromVersion } - Task vagrantSetUpTask = project.tasks.create('vagrantSetUp') + Task vagrantSetUpTask = project.tasks.create('setupBats') vagrantSetUpTask.dependsOn 'vagrantCheckVersion' vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils, copyBatsArchives, createVersionFile, createUpgradeFromFile - vagrantSetUpTask.doFirst { - project.gradle.addBuildListener new BuildAdapter() { - @Override - void buildFinished(BuildResult result) { - if (result.failure) { - println "Reproduce with: gradle packagingTest " - +"-Pvagrant.boxes=${project.extensions.esvagrant.boxes} " - + "-Dtests.seed=${project.extensions.esvagrant.formattedSeed} " - + "-Dtests.packaging.upgrade.from.versions=${project.extensions.esvagrant.upgradeFromVersions.join(",")}" - } - } - } - } } private static void createUpdateVersionsTask(Project project) { @@ -280,7 +258,7 @@ class VagrantTestPlugin implements Plugin { group 'Verification' doLast { File versions = getVersionsFile(project) - versions.text = listVersions(project).join('\n') + '\n' + versions.setText(listVersions(project).join('\n') + '\n', 'UTF-8') } } } @@ -290,14 +268,11 @@ class VagrantTestPlugin implements Plugin { description 'Update file containing options for the\n "starting" version in the "upgrade from" packaging tests.' group 'Verification' doLast { - String maybeUpdateFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null) - if (maybeUpdateFromVersions == null) { - Set versions = listVersions(project) - Set actualVersions = new TreeSet<>(project.extensions.esvagrant.upgradeFromVersions) - if (!versions.equals(actualVersions)) { - throw new GradleException("out-of-date versions " + actualVersions + - ", expected " + versions + "; run gradle vagrantUpdateVersions") - } + Set versions = listVersions(project) + Set actualVersions = new TreeSet<>(getVersionsFile(project).readLines('UTF-8')) + if (!versions.equals(actualVersions)) { + throw new GradleException("out-of-date versions " + actualVersions + + ", expected " + versions + "; run gradle vagrantUpdateVersions") } } } @@ -379,8 +354,8 @@ class VagrantTestPlugin implements Plugin { assert project.tasks.virtualboxCheckVersion != null Task virtualboxCheckVersion = project.tasks.virtualboxCheckVersion - assert project.tasks.vagrantSetUp != null - Task vagrantSetUp = project.tasks.vagrantSetUp + assert project.tasks.setupBats != null + Task setupBats = project.tasks.setupBats assert project.tasks.packagingTest != null Task packagingTest = project.tasks.packagingTest @@ -411,8 +386,9 @@ class VagrantTestPlugin implements Plugin { boxName box environmentVars vagrantEnvVars args 'box', 'update', box - dependsOn vagrantCheckVersion, virtualboxCheckVersion, vagrantSetUp + dependsOn vagrantCheckVersion, virtualboxCheckVersion } + update.mustRunAfter(setupBats) Task up = project.tasks.create("vagrant${boxTask}#up", VagrantCommandTask) { boxName box @@ -433,11 +409,6 @@ class VagrantTestPlugin implements Plugin { dependsOn update } - if (project.extensions.esvagrant.boxes.contains(box) == false) { - // we d'ont need tests tasks if this box was not specified - continue; - } - Task smoke = project.tasks.create("vagrant${boxTask}#smoketest", Exec) { environment vagrantEnvVars dependsOn up @@ -447,14 +418,32 @@ class VagrantTestPlugin implements Plugin { } vagrantSmokeTest.dependsOn(smoke) - Task packaging = project.tasks.create("vagrant${boxTask}#packagingtest", BatsOverVagrantTask) { + Task packaging = project.tasks.create("vagrant${boxTask}#packagingTest", BatsOverVagrantTask) { boxName box environmentVars vagrantEnvVars - dependsOn up + dependsOn up, setupBats finalizedBy halt command BATS_TEST_COMMAND } - packagingTest.dependsOn(packaging) + TaskExecutionAdapter reproduceListener = new TaskExecutionAdapter() { + @Override + void afterExecute(Task task, TaskState state) { + if (state.failure != null) { + println "REPRODUCE WITH: gradle ${packaging.path} " + + "-Dtests.seed=${project.extensions.esvagrant.formattedTestSeed} " + } + } + } + packaging.doFirst { + project.gradle.addListener(reproduceListener) + } + packaging.doLast { + project.gradle.removeListener(reproduceListener) + } + + if (project.extensions.esvagrant.boxes.contains(box)) { + packagingTest.dependsOn(packaging) + } } } } diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index b5f6edb1327..309fd865a22 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -157,7 +157,6 @@ - @@ -452,8 +451,6 @@ - - diff --git a/buildSrc/src/main/resources/forbidden/http-signatures.txt b/buildSrc/src/main/resources/forbidden/http-signatures.txt new file mode 100644 index 00000000000..dcf20bbb093 --- /dev/null +++ b/buildSrc/src/main/resources/forbidden/http-signatures.txt @@ -0,0 +1,45 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +@defaultMessage Explicitly specify the ContentType of HTTP entities when creating +org.apache.http.entity.StringEntity#(java.lang.String) +org.apache.http.entity.StringEntity#(java.lang.String,java.lang.String) +org.apache.http.entity.StringEntity#(java.lang.String,java.nio.charset.Charset) +org.apache.http.entity.ByteArrayEntity#(byte[]) +org.apache.http.entity.ByteArrayEntity#(byte[],int,int) +org.apache.http.entity.FileEntity#(java.io.File) +org.apache.http.entity.InputStreamEntity#(java.io.InputStream) +org.apache.http.entity.InputStreamEntity#(java.io.InputStream,long) +org.apache.http.nio.entity.NByteArrayEntity#(byte[]) +org.apache.http.nio.entity.NByteArrayEntity#(byte[],int,int) +org.apache.http.nio.entity.NFileEntity#(java.io.File) +org.apache.http.nio.entity.NStringEntity#(java.lang.String) +org.apache.http.nio.entity.NStringEntity#(java.lang.String,java.lang.String) + +@defaultMessage Use non-deprecated constructors +org.apache.http.nio.entity.NFileEntity#(java.io.File,java.lang.String) +org.apache.http.nio.entity.NFileEntity#(java.io.File,java.lang.String,boolean) +org.apache.http.entity.FileEntity#(java.io.File,java.lang.String) +org.apache.http.entity.StringEntity#(java.lang.String,java.lang.String,java.lang.String) + +@defaultMessage BasicEntity is easy to mess up and forget to set content type +org.apache.http.entity.BasicHttpEntity#() + +@defaultMessage EntityTemplate is easy to mess up and forget to set content type +org.apache.http.entity.EntityTemplate#(org.apache.http.entity.ContentProducer) + +@defaultMessage SerializableEntity uses java serialization and makes it easy to forget to set content type +org.apache.http.entity.SerializableEntity#(java.io.Serializable) diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java index 1e09e890a0b..77e7cdab937 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java @@ -53,6 +53,6 @@ public class TransportNoopSearchAction extends HandledTransportAction request = bulkRequest.requests().get(i); + + DocWriteRequest.OpType opType = request.opType(); + if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { + bulkContentType = enforceSameContentType((IndexRequest) request, bulkContentType); + + } else if (opType == DocWriteRequest.OpType.UPDATE) { + UpdateRequest updateRequest = (UpdateRequest) request; + if (updateRequest.doc() != null) { + bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType); + } + if (updateRequest.upsertRequest() != null) { + bulkContentType = enforceSameContentType(updateRequest.upsertRequest(), bulkContentType); + } + } + } + + if (bulkContentType == null) { + bulkContentType = XContentType.JSON; + } + + byte separator = bulkContentType.xContent().streamSeparator(); + ContentType requestContentType = ContentType.create(bulkContentType.mediaType()); + + ByteArrayOutputStream content = new ByteArrayOutputStream(); + for (DocWriteRequest request : bulkRequest.requests()) { + DocWriteRequest.OpType opType = request.opType(); + + try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) { + metadata.startObject(); + { + metadata.startObject(opType.getLowercase()); + if (Strings.hasLength(request.index())) { + metadata.field("_index", request.index()); + } + if (Strings.hasLength(request.type())) { + metadata.field("_type", request.type()); + } + if (Strings.hasLength(request.id())) { + metadata.field("_id", request.id()); + } + if (Strings.hasLength(request.routing())) { + metadata.field("_routing", request.routing()); + } + if (Strings.hasLength(request.parent())) { + metadata.field("_parent", request.parent()); + } + if (request.version() != Versions.MATCH_ANY) { + metadata.field("_version", request.version()); + } + + VersionType versionType = request.versionType(); + if (versionType != VersionType.INTERNAL) { + if (versionType == VersionType.EXTERNAL) { + metadata.field("_version_type", "external"); + } else if (versionType == VersionType.EXTERNAL_GTE) { + metadata.field("_version_type", "external_gte"); + } else if (versionType == VersionType.FORCE) { + metadata.field("_version_type", "force"); + } + } + + if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { + IndexRequest indexRequest = (IndexRequest) request; + if (Strings.hasLength(indexRequest.getPipeline())) { + metadata.field("pipeline", indexRequest.getPipeline()); + } + } else if (opType == DocWriteRequest.OpType.UPDATE) { + UpdateRequest updateRequest = (UpdateRequest) request; + if (updateRequest.retryOnConflict() > 0) { + metadata.field("_retry_on_conflict", updateRequest.retryOnConflict()); + } + if (updateRequest.fetchSource() != null) { + metadata.field("_source", updateRequest.fetchSource()); + } + } + metadata.endObject(); + } + metadata.endObject(); + + BytesRef metadataSource = metadata.bytes().toBytesRef(); + content.write(metadataSource.bytes, metadataSource.offset, metadataSource.length); + content.write(separator); + } + + BytesRef source = null; + if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { + IndexRequest indexRequest = (IndexRequest) request; + BytesReference indexSource = indexRequest.source(); + XContentType indexXContentType = indexRequest.getContentType(); + + try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, indexSource, indexXContentType)) { + try (XContentBuilder builder = XContentBuilder.builder(bulkContentType.xContent())) { + builder.copyCurrentStructure(parser); + source = builder.bytes().toBytesRef(); + } + } + } else if (opType == DocWriteRequest.OpType.UPDATE) { + source = XContentHelper.toXContent((UpdateRequest) request, bulkContentType, false).toBytesRef(); + } + + if (source != null) { + content.write(source.bytes, source.offset, source.length); + content.write(separator); + } + } + + HttpEntity entity = new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType); + return new Request(HttpPost.METHOD_NAME, "/_bulk", parameters.getParams(), entity); } static Request exists(GetRequest getRequest) { @@ -118,6 +263,52 @@ final class Request { return new Request(method, endpoint, parameters.getParams(), entity); } + static Request ping() { + return new Request("HEAD", "/", Collections.emptyMap(), null); + } + + static Request update(UpdateRequest updateRequest) throws IOException { + String endpoint = endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update"); + + Params parameters = Params.builder(); + parameters.withRouting(updateRequest.routing()); + parameters.withParent(updateRequest.parent()); + parameters.withTimeout(updateRequest.timeout()); + parameters.withRefreshPolicy(updateRequest.getRefreshPolicy()); + parameters.withWaitForActiveShards(updateRequest.waitForActiveShards()); + parameters.withDocAsUpsert(updateRequest.docAsUpsert()); + parameters.withFetchSourceContext(updateRequest.fetchSource()); + parameters.withRetryOnConflict(updateRequest.retryOnConflict()); + parameters.withVersion(updateRequest.version()); + parameters.withVersionType(updateRequest.versionType()); + + // The Java API allows update requests with different content types + // set for the partial document and the upsert document. This client + // only accepts update requests that have the same content types set + // for both doc and upsert. + XContentType xContentType = null; + if (updateRequest.doc() != null) { + xContentType = updateRequest.doc().getContentType(); + } + if (updateRequest.upsertRequest() != null) { + XContentType upsertContentType = updateRequest.upsertRequest().getContentType(); + if ((xContentType != null) && (xContentType != upsertContentType)) { + throw new IllegalStateException("Update request cannot have different content types for doc [" + xContentType + "]" + + " and upsert [" + upsertContentType + "] documents"); + } else { + xContentType = upsertContentType; + } + } + if (xContentType == null) { + xContentType = Requests.INDEX_CONTENT_TYPE; + } + + BytesRef source = XContentHelper.toXContent(updateRequest, xContentType, false).toBytesRef(); + HttpEntity entity = new ByteArrayEntity(source.bytes, source.offset, source.length, ContentType.create(xContentType.mediaType())); + + return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), entity); + } + /** * Utility method to build request's endpoint. */ @@ -160,6 +351,13 @@ final class Request { return this; } + Params withDocAsUpsert(boolean docAsUpsert) { + if (docAsUpsert) { + return putParam("doc_as_upsert", Boolean.TRUE.toString()); + } + return this; + } + Params withFetchSourceContext(FetchSourceContext fetchSourceContext) { if (fetchSourceContext != null) { if (fetchSourceContext.fetchSource() == false) { @@ -203,7 +401,14 @@ final class Request { Params withRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) { if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) { - putParam("refresh", refreshPolicy.getValue()); + return putParam("refresh", refreshPolicy.getValue()); + } + return this; + } + + Params withRetryOnConflict(int retryOnConflict) { + if (retryOnConflict > 0) { + return putParam("retry_on_conflict", String.valueOf(retryOnConflict)); } return this; } @@ -252,4 +457,26 @@ final class Request { return new Params(); } } + + /** + * Ensure that the {@link IndexRequest}'s content type is supported by the Bulk API and that it conforms + * to the current {@link BulkRequest}'s content type (if it's known at the time of this method get called). + * + * @return the {@link IndexRequest}'s content type + */ + static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable XContentType xContentType) { + XContentType requestContentType = indexRequest.getContentType(); + if (requestContentType != XContentType.JSON && requestContentType != XContentType.SMILE) { + throw new IllegalArgumentException("Unsupported content-type found for request with content-type [" + requestContentType + + "], only JSON and SMILE are supported"); + } + if (xContentType == null) { + return requestContentType; + } + if (requestContentType != xContentType) { + throw new IllegalArgumentException("Mismatching content-type found for request with content-type [" + requestContentType + + "], previous requests have content-type [" + xContentType + "]"); + } + return xContentType; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 8e29ff7a5a1..70e19f2ddcb 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -26,11 +26,17 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.main.MainRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; @@ -39,24 +45,64 @@ import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.Objects; import java.util.Set; import java.util.function.Function; +import java.util.stream.Stream; import static java.util.Collections.emptySet; import static java.util.Collections.singleton; +import static java.util.stream.Collectors.toList; /** * High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses. * The provided {@link RestClient} is externally built and closed. + * Can be sub-classed to expose additional client methods that make use of endpoints added to Elasticsearch through plugins, or to + * add support for custom response sections, again added to Elasticsearch through plugins. */ public class RestHighLevelClient { private final RestClient client; + private final NamedXContentRegistry registry; - public RestHighLevelClient(RestClient client) { - this.client = Objects.requireNonNull(client); + /** + * Creates a {@link RestHighLevelClient} given the low level {@link RestClient} that it should use to perform requests. + */ + public RestHighLevelClient(RestClient restClient) { + this(restClient, Collections.emptyList()); + } + + /** + * Creates a {@link RestHighLevelClient} given the low level {@link RestClient} that it should use to perform requests and + * a list of entries that allow to parse custom response sections added to Elasticsearch through plugins. + */ + protected RestHighLevelClient(RestClient restClient, List namedXContentEntries) { + this.client = Objects.requireNonNull(restClient); + this.registry = new NamedXContentRegistry(Stream.of( + getNamedXContents().stream(), + namedXContentEntries.stream() + ).flatMap(Function.identity()).collect(toList())); + } + + /** + * Executes a bulk request using the Bulk API + * + * See Bulk API on elastic.co + */ + public BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException { + return performRequestAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously executes a bulk request using the Bulk API + * + * See Bulk API on elastic.co + */ + public void bulkAsync(BulkRequest bulkRequest, ActionListener listener, Header... headers) { + performRequestAsyncAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, listener, emptySet(), headers); } /** @@ -121,14 +167,55 @@ public class RestHighLevelClient { performRequestAsyncAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, listener, emptySet(), headers); } - private Resp performRequestAndParseEntity(Req request, Function requestConverter, - CheckedFunction entityParser, Set ignores, Header... headers) throws IOException { + /** + * Updates a document using the Update API + *

+ * See Update API on elastic.co + */ + public UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException { + return performRequestAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously updates a document using the Update API + *

+ * See Update API on elastic.co + */ + public void updateAsync(UpdateRequest updateRequest, ActionListener listener, Header... headers) { + performRequestAsyncAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, listener, emptySet(), headers); + } + + /** + * Deletes a document by id using the Delete api + * + * See Delete API on elastic.co + */ + public DeleteResponse delete(DeleteRequest deleteRequest, Header... headers) throws IOException { + return performRequestAndParseEntity(deleteRequest, Request::delete, DeleteResponse::fromXContent, Collections.singleton(404), + headers); + } + + /** + * Asynchronously deletes a document by id using the Delete api + * + * See Delete API on elastic.co + */ + public void deleteAsync(DeleteRequest deleteRequest, ActionListener listener, Header... headers) { + performRequestAsyncAndParseEntity(deleteRequest, Request::delete, DeleteResponse::fromXContent, listener, + Collections.singleton(404), headers); + } + + private Resp performRequestAndParseEntity(Req request, + CheckedFunction requestConverter, + CheckedFunction entityParser, + Set ignores, Header... headers) throws IOException { return performRequest(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), ignores, headers); } - Resp performRequest(Req request, Function requestConverter, - CheckedFunction responseConverter, Set ignores, Header... headers) throws IOException { - + Resp performRequest(Req request, + CheckedFunction requestConverter, + CheckedFunction responseConverter, + Set ignores, Header... headers) throws IOException { ActionRequestValidationException validationException = request.validate(); if (validationException != null) { throw validationException; @@ -154,27 +241,36 @@ public class RestHighLevelClient { } } - private void performRequestAsyncAndParseEntity(Req request, Function requestConverter, - CheckedFunction entityParser, ActionListener listener, - Set ignores, Header... headers) { + private void performRequestAsyncAndParseEntity(Req request, + CheckedFunction requestConverter, + CheckedFunction entityParser, + ActionListener listener, Set ignores, Header... headers) { performRequestAsync(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), listener, ignores, headers); } - void performRequestAsync(Req request, Function requestConverter, - CheckedFunction responseConverter, ActionListener listener, - Set ignores, Header... headers) { + void performRequestAsync(Req request, + CheckedFunction requestConverter, + CheckedFunction responseConverter, + ActionListener listener, Set ignores, Header... headers) { ActionRequestValidationException validationException = request.validate(); if (validationException != null) { listener.onFailure(validationException); return; } - Request req = requestConverter.apply(request); + Request req; + try { + req = requestConverter.apply(request); + } catch (Exception e) { + listener.onFailure(e); + return; + } + ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores); client.performRequestAsync(req.method, req.endpoint, req.params, req.entity, responseListener, headers); } - static ResponseListener wrapResponseListener(CheckedFunction responseConverter, + ResponseListener wrapResponseListener(CheckedFunction responseConverter, ActionListener actionListener, Set ignores) { return new ResponseListener() { @Override @@ -219,7 +315,7 @@ public class RestHighLevelClient { * that wraps the original {@link ResponseException}. The potential exception obtained while parsing is added to the returned * exception as a suppressed exception. This method is guaranteed to not throw any exception eventually thrown while parsing. */ - static ElasticsearchStatusException parseResponseException(ResponseException responseException) { + ElasticsearchStatusException parseResponseException(ResponseException responseException) { Response response = responseException.getResponse(); HttpEntity entity = response.getEntity(); ElasticsearchStatusException elasticsearchException; @@ -239,7 +335,7 @@ public class RestHighLevelClient { return elasticsearchException; } - static Resp parseEntity( + Resp parseEntity( HttpEntity entity, CheckedFunction entityParser) throws IOException { if (entity == null) { throw new IllegalStateException("Response body expected but not returned"); @@ -251,7 +347,7 @@ public class RestHighLevelClient { if (xContentType == null) { throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue()); } - try (XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, entity.getContent())) { + try (XContentParser parser = xContentType.xContent().createParser(registry, entity.getContent())) { return entityParser.apply(parser); } } @@ -259,4 +355,10 @@ public class RestHighLevelClient { static boolean convertExistsResponse(Response response) { return response.getStatusLine().getStatusCode() == 200; } + + static List getNamedXContents() { + List namedXContents = new ArrayList<>(); + //namedXContents.add(new NamedXContentRegistry.Entry(Aggregation.class, new ParseField("sterms"), StringTerms::fromXContent)); + return namedXContents; + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index a7989fe63ae..346d7d7c756 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -25,26 +25,112 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.get.GetResult; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; import java.util.Collections; import java.util.Map; -import static org.hamcrest.CoreMatchers.containsString; +import static java.util.Collections.singletonMap; public class CrudIT extends ESRestHighLevelClientTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/23196") + public void testDelete() throws IOException { + { + // Testing non existing document + String docId = "does_not_exist"; + DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId); + DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + assertEquals("index", deleteResponse.getIndex()); + assertEquals("type", deleteResponse.getType()); + assertEquals(docId, deleteResponse.getId()); + assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult()); + } + { + // Testing deletion + String docId = "id"; + highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar"))); + DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId); + if (randomBoolean()) { + deleteRequest.version(1L); + } + DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + assertEquals("index", deleteResponse.getIndex()); + assertEquals("type", deleteResponse.getType()); + assertEquals(docId, deleteResponse.getId()); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + } + { + // Testing version conflict + String docId = "version_conflict"; + highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar"))); + DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).version(2); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync)); + assertEquals(RestStatus.CONFLICT, exception.status()); + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][" + docId + "]: " + + "version conflict, current version [1] is different than the one provided [2]]", exception.getMessage()); + assertEquals("index", exception.getMetadata("es.index").get(0)); + } + { + // Testing version type + String docId = "version_type"; + highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")) + .versionType(VersionType.EXTERNAL).version(12)); + DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).versionType(VersionType.EXTERNAL).version(13); + DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + assertEquals("index", deleteResponse.getIndex()); + assertEquals("type", deleteResponse.getType()); + assertEquals(docId, deleteResponse.getId()); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + } + { + // Testing version type with a wrong version + String docId = "wrong_version"; + highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")) + .versionType(VersionType.EXTERNAL).version(12)); + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> { + DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).versionType(VersionType.EXTERNAL).version(10); + execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + }); + assertEquals(RestStatus.CONFLICT, exception.status()); + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][" + + docId + "]: version conflict, current version [12] is higher or equal to the one provided [10]]", exception.getMessage()); + assertEquals("index", exception.getMetadata("es.index").get(0)); + } + { + // Testing routing + String docId = "routing"; + highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")).routing("foo")); + DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).routing("foo"); + DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + assertEquals("index", deleteResponse.getIndex()); + assertEquals("type", deleteResponse.getType()); + assertEquals(docId, deleteResponse.getId()); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + } + } + public void testExists() throws IOException { { GetRequest getRequest = new GetRequest("index", "type", "id"); @@ -64,10 +150,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase { } { GetRequest getRequest = new GetRequest("index", "type", "does_not_exist").version(1); - ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); - assertEquals(RestStatus.BAD_REQUEST, exception.status()); - assertThat(exception.getMessage(), containsString("/index/type/does_not_exist?version=1: HTTP/1.1 400 Bad Request")); + assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); } } @@ -266,4 +349,253 @@ public class CrudIT extends ESRestHighLevelClientTestCase { "version conflict, document already exists (current version [1])]", exception.getMessage()); } } + + public void testUpdate() throws IOException { + { + UpdateRequest updateRequest = new UpdateRequest("index", "type", "does_not_exist"); + updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values())); + + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> + execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + assertEquals("Elasticsearch exception [type=document_missing_exception, reason=[type][does_not_exist]: document missing]", + exception.getMessage()); + } + { + IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + indexRequest.source(singletonMap("field", "value")); + IndexResponse indexResponse = highLevelClient().index(indexRequest); + assertEquals(RestStatus.CREATED, indexResponse.status()); + + UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values())); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.OK, updateResponse.status()); + assertEquals(indexResponse.getVersion() + 1, updateResponse.getVersion()); + + UpdateRequest updateRequestConflict = new UpdateRequest("index", "type", "id"); + updateRequestConflict.doc(singletonMap("field", "with_version_conflict"), randomFrom(XContentType.values())); + updateRequestConflict.version(indexResponse.getVersion()); + + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> + execute(updateRequestConflict, highLevelClient()::update, highLevelClient()::updateAsync)); + assertEquals(RestStatus.CONFLICT, exception.status()); + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][id]: version conflict, " + + "current version [2] is different than the one provided [1]]", exception.getMessage()); + } + { + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> { + UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values())); + if (randomBoolean()) { + updateRequest.parent("missing"); + } else { + updateRequest.routing("missing"); + } + execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + }); + + assertEquals(RestStatus.NOT_FOUND, exception.status()); + assertEquals("Elasticsearch exception [type=document_missing_exception, reason=[type][id]: document missing]", + exception.getMessage()); + } + { + IndexRequest indexRequest = new IndexRequest("index", "type", "with_script"); + indexRequest.source(singletonMap("counter", 12)); + IndexResponse indexResponse = highLevelClient().index(indexRequest); + assertEquals(RestStatus.CREATED, indexResponse.status()); + + UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_script"); + Script script = new Script(ScriptType.INLINE, "painless", "ctx._source.counter += params.count", singletonMap("count", 8)); + updateRequest.script(script); + updateRequest.fetchSource(true); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.OK, updateResponse.status()); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); + assertEquals(2L, updateResponse.getVersion()); + assertEquals(20, updateResponse.getGetResult().sourceAsMap().get("counter")); + + } + { + IndexRequest indexRequest = new IndexRequest("index", "type", "with_doc"); + indexRequest.source("field_1", "one", "field_3", "three"); + indexRequest.version(12L); + indexRequest.versionType(VersionType.EXTERNAL); + IndexResponse indexResponse = highLevelClient().index(indexRequest); + assertEquals(RestStatus.CREATED, indexResponse.status()); + assertEquals(12L, indexResponse.getVersion()); + + UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_doc"); + updateRequest.doc(singletonMap("field_2", "two"), randomFrom(XContentType.values())); + updateRequest.fetchSource("field_*", "field_3"); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.OK, updateResponse.status()); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); + assertEquals(13L, updateResponse.getVersion()); + GetResult getResult = updateResponse.getGetResult(); + assertEquals(13L, updateResponse.getVersion()); + Map sourceAsMap = getResult.sourceAsMap(); + assertEquals("one", sourceAsMap.get("field_1")); + assertEquals("two", sourceAsMap.get("field_2")); + assertFalse(sourceAsMap.containsKey("field_3")); + } + { + IndexRequest indexRequest = new IndexRequest("index", "type", "noop"); + indexRequest.source("field", "value"); + IndexResponse indexResponse = highLevelClient().index(indexRequest); + assertEquals(RestStatus.CREATED, indexResponse.status()); + assertEquals(1L, indexResponse.getVersion()); + + UpdateRequest updateRequest = new UpdateRequest("index", "type", "noop"); + updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values())); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.OK, updateResponse.status()); + assertEquals(DocWriteResponse.Result.NOOP, updateResponse.getResult()); + assertEquals(1L, updateResponse.getVersion()); + + updateRequest.detectNoop(false); + + updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.OK, updateResponse.status()); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); + assertEquals(2L, updateResponse.getVersion()); + } + { + UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_upsert"); + updateRequest.upsert(singletonMap("doc_status", "created")); + updateRequest.doc(singletonMap("doc_status", "updated")); + updateRequest.fetchSource(true); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.CREATED, updateResponse.status()); + assertEquals("index", updateResponse.getIndex()); + assertEquals("type", updateResponse.getType()); + assertEquals("with_upsert", updateResponse.getId()); + GetResult getResult = updateResponse.getGetResult(); + assertEquals(1L, updateResponse.getVersion()); + assertEquals("created", getResult.sourceAsMap().get("doc_status")); + } + { + UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_doc_as_upsert"); + updateRequest.doc(singletonMap("field", "initialized")); + updateRequest.fetchSource(true); + updateRequest.docAsUpsert(true); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.CREATED, updateResponse.status()); + assertEquals("index", updateResponse.getIndex()); + assertEquals("type", updateResponse.getType()); + assertEquals("with_doc_as_upsert", updateResponse.getId()); + GetResult getResult = updateResponse.getGetResult(); + assertEquals(1L, updateResponse.getVersion()); + assertEquals("initialized", getResult.sourceAsMap().get("field")); + } + { + UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_scripted_upsert"); + updateRequest.fetchSource(true); + updateRequest.script(new Script(ScriptType.INLINE, "painless", "ctx._source.level = params.test", singletonMap("test", "C"))); + updateRequest.scriptedUpsert(true); + updateRequest.upsert(singletonMap("level", "A")); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.CREATED, updateResponse.status()); + assertEquals("index", updateResponse.getIndex()); + assertEquals("type", updateResponse.getType()); + assertEquals("with_scripted_upsert", updateResponse.getId()); + + GetResult getResult = updateResponse.getGetResult(); + assertEquals(1L, updateResponse.getVersion()); + assertEquals("C", getResult.sourceAsMap().get("level")); + } + { + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { + UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + updateRequest.doc(new IndexRequest().source(Collections.singletonMap("field", "doc"), XContentType.JSON)); + updateRequest.upsert(new IndexRequest().source(Collections.singletonMap("field", "upsert"), XContentType.YAML)); + execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + }); + assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents", + exception.getMessage()); + } + } + + public void testBulk() throws IOException { + int nbItems = randomIntBetween(10, 100); + boolean[] errors = new boolean[nbItems]; + + XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < nbItems; i++) { + String id = String.valueOf(i); + boolean erroneous = randomBoolean(); + errors[i] = erroneous; + + DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values()); + if (opType == DocWriteRequest.OpType.DELETE) { + if (erroneous == false) { + assertEquals(RestStatus.CREATED, + highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status()); + } + DeleteRequest deleteRequest = new DeleteRequest("index", "test", id); + bulkRequest.add(deleteRequest); + + } else { + BytesReference source = XContentBuilder.builder(xContentType.xContent()).startObject().field("id", i).endObject().bytes(); + if (opType == DocWriteRequest.OpType.INDEX) { + IndexRequest indexRequest = new IndexRequest("index", "test", id).source(source, xContentType); + if (erroneous) { + indexRequest.version(12L); + } + bulkRequest.add(indexRequest); + + } else if (opType == DocWriteRequest.OpType.CREATE) { + IndexRequest createRequest = new IndexRequest("index", "test", id).source(source, xContentType).create(true); + if (erroneous) { + assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest).status()); + } + bulkRequest.add(createRequest); + + } else if (opType == DocWriteRequest.OpType.UPDATE) { + UpdateRequest updateRequest = new UpdateRequest("index", "test", id) + .doc(new IndexRequest().source(source, xContentType)); + if (erroneous == false) { + assertEquals(RestStatus.CREATED, + highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status()); + } + bulkRequest.add(updateRequest); + } + } + } + + BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync); + assertEquals(RestStatus.OK, bulkResponse.status()); + assertTrue(bulkResponse.getTookInMillis() > 0); + assertEquals(nbItems, bulkResponse.getItems().length); + + for (int i = 0; i < nbItems; i++) { + BulkItemResponse bulkItemResponse = bulkResponse.getItems()[i]; + + assertEquals(i, bulkItemResponse.getItemId()); + assertEquals("index", bulkItemResponse.getIndex()); + assertEquals("test", bulkItemResponse.getType()); + assertEquals(String.valueOf(i), bulkItemResponse.getId()); + + DocWriteRequest.OpType requestOpType = bulkRequest.requests().get(i).opType(); + if (requestOpType == DocWriteRequest.OpType.INDEX || requestOpType == DocWriteRequest.OpType.CREATE) { + assertEquals(errors[i], bulkItemResponse.isFailed()); + assertEquals(errors[i] ? RestStatus.INTERNAL_SERVER_ERROR : RestStatus.CREATED, bulkItemResponse.status()); + } else if (requestOpType == DocWriteRequest.OpType.UPDATE) { + assertEquals(errors[i], bulkItemResponse.isFailed()); + assertEquals(errors[i] ? RestStatus.INTERNAL_SERVER_ERROR : RestStatus.OK, bulkItemResponse.status()); + } else if (requestOpType == DocWriteRequest.OpType.DELETE) { + assertFalse(bulkItemResponse.isFailed()); + assertEquals(errors[i] ? RestStatus.NOT_FOUND : RestStatus.OK, bulkItemResponse.status()); + } + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index f9bf4cc1a39..62bb6b551af 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -22,25 +22,40 @@ package org.elasticsearch.client; import org.apache.http.HttpEntity; import org.apache.http.entity.ByteArrayEntity; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkShardRequest; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; +import org.elasticsearch.script.Script; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.RandomObjects; import java.io.IOException; +import java.io.InputStream; import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.function.Consumer; import java.util.function.Function; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.client.Request.enforceSameContentType; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; + public class RequestTests extends ESTestCase { public void testPing() { @@ -55,6 +70,39 @@ public class RequestTests extends ESTestCase { getAndExistsTest(Request::get, "GET"); } + public void testDelete() throws IOException { + String index = randomAsciiOfLengthBetween(3, 10); + String type = randomAsciiOfLengthBetween(3, 10); + String id = randomAsciiOfLengthBetween(3, 10); + DeleteRequest deleteRequest = new DeleteRequest(index, type, id); + + Map expectedParams = new HashMap<>(); + + setRandomTimeout(deleteRequest, expectedParams); + setRandomRefreshPolicy(deleteRequest, expectedParams); + setRandomVersion(deleteRequest, expectedParams); + setRandomVersionType(deleteRequest, expectedParams); + + if (frequently()) { + if (randomBoolean()) { + String routing = randomAsciiOfLengthBetween(3, 10); + deleteRequest.routing(routing); + expectedParams.put("routing", routing); + } + if (randomBoolean()) { + String parent = randomAsciiOfLengthBetween(3, 10); + deleteRequest.parent(parent); + expectedParams.put("parent", parent); + } + } + + Request request = Request.delete(deleteRequest); + assertEquals("/" + index + "/" + type + "/" + id, request.endpoint); + assertEquals(expectedParams, request.params); + assertEquals("DELETE", request.method); + assertNull(request.entity); + } + public void testExists() { getAndExistsTest(Request::exists, "HEAD"); } @@ -121,43 +169,7 @@ public class RequestTests extends ESTestCase { expectedParams.put("stored_fields", storedFieldsParam.toString()); } if (randomBoolean()) { - if (randomBoolean()) { - boolean fetchSource = randomBoolean(); - getRequest.fetchSourceContext(new FetchSourceContext(fetchSource)); - if (fetchSource == false) { - expectedParams.put("_source", "false"); - } - } else { - int numIncludes = randomIntBetween(0, 5); - String[] includes = new String[numIncludes]; - StringBuilder includesParam = new StringBuilder(); - for (int i = 0; i < numIncludes; i++) { - String include = randomAsciiOfLengthBetween(3, 10); - includes[i] = include; - includesParam.append(include); - if (i < numIncludes - 1) { - includesParam.append(","); - } - } - if (numIncludes > 0) { - expectedParams.put("_source_include", includesParam.toString()); - } - int numExcludes = randomIntBetween(0, 5); - String[] excludes = new String[numExcludes]; - StringBuilder excludesParam = new StringBuilder(); - for (int i = 0; i < numExcludes; i++) { - String exclude = randomAsciiOfLengthBetween(3, 10); - excludes[i] = exclude; - excludesParam.append(exclude); - if (i < numExcludes - 1) { - excludesParam.append(","); - } - } - if (numExcludes > 0) { - expectedParams.put("_source_exclude", excludesParam.toString()); - } - getRequest.fetchSourceContext(new FetchSourceContext(true, includes, excludes)); - } + randomizeFetchSourceContextParams(getRequest::fetchSourceContext, expectedParams); } } Request request = requestConverter.apply(getRequest); @@ -185,33 +197,16 @@ public class RequestTests extends ESTestCase { } } + setRandomTimeout(indexRequest, expectedParams); + setRandomRefreshPolicy(indexRequest, expectedParams); + // There is some logic around _create endpoint and version/version type if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) { indexRequest.version(randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED)); expectedParams.put("version", Long.toString(Versions.MATCH_DELETED)); } else { - if (randomBoolean()) { - long version = randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED, Versions.NOT_FOUND, randomNonNegativeLong()); - indexRequest.version(version); - if (version != Versions.MATCH_ANY) { - expectedParams.put("version", Long.toString(version)); - } - } - if (randomBoolean()) { - VersionType versionType = randomFrom(VersionType.values()); - indexRequest.versionType(versionType); - if (versionType != VersionType.INTERNAL) { - expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT)); - } - } - } - - if (randomBoolean()) { - String timeout = randomTimeValue(); - indexRequest.timeout(timeout); - expectedParams.put("timeout", timeout); - } else { - expectedParams.put("timeout", ReplicationRequest.DEFAULT_TIMEOUT.getStringRep()); + setRandomVersion(indexRequest, expectedParams); + setRandomVersionType(indexRequest, expectedParams); } if (frequently()) { @@ -230,14 +225,6 @@ public class RequestTests extends ESTestCase { indexRequest.setPipeline(pipeline); expectedParams.put("pipeline", pipeline); } - - if (randomBoolean()) { - WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values()); - indexRequest.setRefreshPolicy(refreshPolicy); - if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) { - expectedParams.put("refresh", refreshPolicy.getValue()); - } - } } XContentType xContentType = randomFrom(XContentType.values()); @@ -271,6 +258,325 @@ public class RequestTests extends ESTestCase { } } + public void testUpdate() throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + + Map expectedParams = new HashMap<>(); + String index = randomAsciiOfLengthBetween(3, 10); + String type = randomAsciiOfLengthBetween(3, 10); + String id = randomAsciiOfLengthBetween(3, 10); + + UpdateRequest updateRequest = new UpdateRequest(index, type, id); + updateRequest.detectNoop(randomBoolean()); + + if (randomBoolean()) { + BytesReference source = RandomObjects.randomSource(random(), xContentType); + updateRequest.doc(new IndexRequest().source(source, xContentType)); + + boolean docAsUpsert = randomBoolean(); + updateRequest.docAsUpsert(docAsUpsert); + if (docAsUpsert) { + expectedParams.put("doc_as_upsert", "true"); + } + } else { + updateRequest.script(new Script("_value + 1")); + updateRequest.scriptedUpsert(randomBoolean()); + } + if (randomBoolean()) { + BytesReference source = RandomObjects.randomSource(random(), xContentType); + updateRequest.upsert(new IndexRequest().source(source, xContentType)); + } + if (randomBoolean()) { + String routing = randomAsciiOfLengthBetween(3, 10); + updateRequest.routing(routing); + expectedParams.put("routing", routing); + } + if (randomBoolean()) { + String parent = randomAsciiOfLengthBetween(3, 10); + updateRequest.parent(parent); + expectedParams.put("parent", parent); + } + if (randomBoolean()) { + String timeout = randomTimeValue(); + updateRequest.timeout(timeout); + expectedParams.put("timeout", timeout); + } else { + expectedParams.put("timeout", ReplicationRequest.DEFAULT_TIMEOUT.getStringRep()); + } + if (randomBoolean()) { + WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values()); + updateRequest.setRefreshPolicy(refreshPolicy); + if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) { + expectedParams.put("refresh", refreshPolicy.getValue()); + } + } + if (randomBoolean()) { + int waitForActiveShards = randomIntBetween(0, 10); + updateRequest.waitForActiveShards(waitForActiveShards); + expectedParams.put("wait_for_active_shards", String.valueOf(waitForActiveShards)); + } + if (randomBoolean()) { + long version = randomLong(); + updateRequest.version(version); + if (version != Versions.MATCH_ANY) { + expectedParams.put("version", Long.toString(version)); + } + } + if (randomBoolean()) { + VersionType versionType = randomFrom(VersionType.values()); + updateRequest.versionType(versionType); + if (versionType != VersionType.INTERNAL) { + expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT)); + } + } + if (randomBoolean()) { + int retryOnConflict = randomIntBetween(0, 5); + updateRequest.retryOnConflict(retryOnConflict); + if (retryOnConflict > 0) { + expectedParams.put("retry_on_conflict", String.valueOf(retryOnConflict)); + } + } + if (randomBoolean()) { + randomizeFetchSourceContextParams(updateRequest::fetchSource, expectedParams); + } + + Request request = Request.update(updateRequest); + assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.endpoint); + assertEquals(expectedParams, request.params); + assertEquals("POST", request.method); + + HttpEntity entity = request.entity; + assertNotNull(entity); + assertTrue(entity instanceof ByteArrayEntity); + + UpdateRequest parsedUpdateRequest = new UpdateRequest(); + + XContentType entityContentType = XContentType.fromMediaTypeOrFormat(entity.getContentType().getValue()); + try (XContentParser parser = createParser(entityContentType.xContent(), entity.getContent())) { + parsedUpdateRequest.fromXContent(parser); + } + + assertEquals(updateRequest.scriptedUpsert(), parsedUpdateRequest.scriptedUpsert()); + assertEquals(updateRequest.docAsUpsert(), parsedUpdateRequest.docAsUpsert()); + assertEquals(updateRequest.detectNoop(), parsedUpdateRequest.detectNoop()); + assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource()); + assertEquals(updateRequest.script(), parsedUpdateRequest.script()); + if (updateRequest.doc() != null) { + assertToXContentEquivalent(updateRequest.doc().source(), parsedUpdateRequest.doc().source(), xContentType); + } else { + assertNull(parsedUpdateRequest.doc()); + } + if (updateRequest.upsertRequest() != null) { + assertToXContentEquivalent(updateRequest.upsertRequest().source(), parsedUpdateRequest.upsertRequest().source(), xContentType); + } else { + assertNull(parsedUpdateRequest.upsertRequest()); + } + } + + public void testUpdateWithDifferentContentTypes() throws IOException { + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { + UpdateRequest updateRequest = new UpdateRequest(); + updateRequest.doc(new IndexRequest().source(singletonMap("field", "doc"), XContentType.JSON)); + updateRequest.upsert(new IndexRequest().source(singletonMap("field", "upsert"), XContentType.YAML)); + Request.update(updateRequest); + }); + assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents", + exception.getMessage()); + } + + public void testBulk() throws IOException { + Map expectedParams = new HashMap<>(); + + BulkRequest bulkRequest = new BulkRequest(); + if (randomBoolean()) { + String timeout = randomTimeValue(); + bulkRequest.timeout(timeout); + expectedParams.put("timeout", timeout); + } else { + expectedParams.put("timeout", BulkShardRequest.DEFAULT_TIMEOUT.getStringRep()); + } + + if (randomBoolean()) { + WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values()); + bulkRequest.setRefreshPolicy(refreshPolicy); + if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) { + expectedParams.put("refresh", refreshPolicy.getValue()); + } + } + + XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + + int nbItems = randomIntBetween(10, 100); + for (int i = 0; i < nbItems; i++) { + String index = randomAsciiOfLength(5); + String type = randomAsciiOfLength(5); + String id = randomAsciiOfLength(5); + + BytesReference source = RandomObjects.randomSource(random(), xContentType); + DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values()); + + DocWriteRequest docWriteRequest = null; + if (opType == DocWriteRequest.OpType.INDEX) { + IndexRequest indexRequest = new IndexRequest(index, type, id).source(source, xContentType); + docWriteRequest = indexRequest; + if (randomBoolean()) { + indexRequest.setPipeline(randomAsciiOfLength(5)); + } + if (randomBoolean()) { + indexRequest.parent(randomAsciiOfLength(5)); + } + } else if (opType == DocWriteRequest.OpType.CREATE) { + IndexRequest createRequest = new IndexRequest(index, type, id).source(source, xContentType).create(true); + docWriteRequest = createRequest; + if (randomBoolean()) { + createRequest.parent(randomAsciiOfLength(5)); + } + } else if (opType == DocWriteRequest.OpType.UPDATE) { + final UpdateRequest updateRequest = new UpdateRequest(index, type, id).doc(new IndexRequest().source(source, xContentType)); + docWriteRequest = updateRequest; + if (randomBoolean()) { + updateRequest.retryOnConflict(randomIntBetween(1, 5)); + } + if (randomBoolean()) { + randomizeFetchSourceContextParams(updateRequest::fetchSource, new HashMap<>()); + } + if (randomBoolean()) { + updateRequest.parent(randomAsciiOfLength(5)); + } + } else if (opType == DocWriteRequest.OpType.DELETE) { + docWriteRequest = new DeleteRequest(index, type, id); + } + + if (randomBoolean()) { + docWriteRequest.routing(randomAsciiOfLength(10)); + } + if (randomBoolean()) { + docWriteRequest.version(randomNonNegativeLong()); + } + if (randomBoolean()) { + docWriteRequest.versionType(randomFrom(VersionType.values())); + } + bulkRequest.add(docWriteRequest); + } + + Request request = Request.bulk(bulkRequest); + assertEquals("/_bulk", request.endpoint); + assertEquals(expectedParams, request.params); + assertEquals("POST", request.method); + + byte[] content = new byte[(int) request.entity.getContentLength()]; + try (InputStream inputStream = request.entity.getContent()) { + Streams.readFully(inputStream, content); + } + + BulkRequest parsedBulkRequest = new BulkRequest(); + parsedBulkRequest.add(content, 0, content.length, xContentType); + assertEquals(bulkRequest.numberOfActions(), parsedBulkRequest.numberOfActions()); + + for (int i = 0; i < bulkRequest.numberOfActions(); i++) { + DocWriteRequest originalRequest = bulkRequest.requests().get(i); + DocWriteRequest parsedRequest = parsedBulkRequest.requests().get(i); + + assertEquals(originalRequest.opType(), parsedRequest.opType()); + assertEquals(originalRequest.index(), parsedRequest.index()); + assertEquals(originalRequest.type(), parsedRequest.type()); + assertEquals(originalRequest.id(), parsedRequest.id()); + assertEquals(originalRequest.routing(), parsedRequest.routing()); + assertEquals(originalRequest.parent(), parsedRequest.parent()); + assertEquals(originalRequest.version(), parsedRequest.version()); + assertEquals(originalRequest.versionType(), parsedRequest.versionType()); + + DocWriteRequest.OpType opType = originalRequest.opType(); + if (opType == DocWriteRequest.OpType.INDEX) { + IndexRequest indexRequest = (IndexRequest) originalRequest; + IndexRequest parsedIndexRequest = (IndexRequest) parsedRequest; + + assertEquals(indexRequest.getPipeline(), parsedIndexRequest.getPipeline()); + assertToXContentEquivalent(indexRequest.source(), parsedIndexRequest.source(), xContentType); + } else if (opType == DocWriteRequest.OpType.UPDATE) { + UpdateRequest updateRequest = (UpdateRequest) originalRequest; + UpdateRequest parsedUpdateRequest = (UpdateRequest) parsedRequest; + + assertEquals(updateRequest.retryOnConflict(), parsedUpdateRequest.retryOnConflict()); + assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource()); + if (updateRequest.doc() != null) { + assertToXContentEquivalent(updateRequest.doc().source(), parsedUpdateRequest.doc().source(), xContentType); + } else { + assertNull(parsedUpdateRequest.doc()); + } + } + } + } + + public void testBulkWithDifferentContentTypes() throws IOException { + { + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new DeleteRequest("index", "type", "0")); + bulkRequest.add(new UpdateRequest("index", "type", "1").script(new Script("test"))); + bulkRequest.add(new DeleteRequest("index", "type", "2")); + + Request request = Request.bulk(bulkRequest); + assertEquals(XContentType.JSON.mediaType(), request.entity.getContentType().getValue()); + } + { + XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new DeleteRequest("index", "type", "0")); + bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), xContentType)); + bulkRequest.add(new DeleteRequest("index", "type", "2")); + + Request request = Request.bulk(bulkRequest); + assertEquals(xContentType.mediaType(), request.entity.getContentType().getValue()); + } + { + XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + UpdateRequest updateRequest = new UpdateRequest("index", "type", "0"); + if (randomBoolean()) { + updateRequest.doc(new IndexRequest().source(singletonMap("field", "value"), xContentType)); + } else { + updateRequest.upsert(new IndexRequest().source(singletonMap("field", "value"), xContentType)); + } + + Request request = Request.bulk(new BulkRequest().add(updateRequest)); + assertEquals(xContentType.mediaType(), request.entity.getContentType().getValue()); + } + { + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE)); + bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest)); + assertEquals("Mismatching content-type found for request with content-type [JSON], " + + "previous requests have content-type [SMILE]", exception.getMessage()); + } + { + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest("index", "type", "0") + .source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index", "type", "1") + .source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new UpdateRequest("index", "type", "2") + .doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON)) + .upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE)) + ); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest)); + assertEquals("Mismatching content-type found for request with content-type [SMILE], " + + "previous requests have content-type [JSON]", exception.getMessage()); + } + { + XContentType xContentType = randomFrom(XContentType.CBOR, XContentType.YAML); + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new DeleteRequest("index", "type", "0")); + bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new DeleteRequest("index", "type", "2")); + bulkRequest.add(new DeleteRequest("index", "type", "3")); + bulkRequest.add(new IndexRequest("index", "type", "4").source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), xContentType)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest)); + assertEquals("Unsupported content-type found for request with content-type [" + xContentType + + "], only JSON and SMILE are supported", exception.getMessage()); + } + } + public void testParams() { final int nbParams = randomIntBetween(0, 10); Request.Params params = Request.Params.builder(); @@ -306,5 +612,117 @@ public class RequestTests extends ESTestCase { assertEquals("/a/b", Request.endpoint("a", "b")); assertEquals("/a/b/_create", Request.endpoint("a", "b", "_create")); assertEquals("/a/b/c/_create", Request.endpoint("a", "b", "c", "_create")); + assertEquals("/a/_create", Request.endpoint("a", null, null, "_create")); } -} \ No newline at end of file + + public void testEnforceSameContentType() { + XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + IndexRequest indexRequest = new IndexRequest().source(singletonMap("field", "value"), xContentType); + assertEquals(xContentType, enforceSameContentType(indexRequest, null)); + assertEquals(xContentType, enforceSameContentType(indexRequest, xContentType)); + + XContentType bulkContentType = randomBoolean() ? xContentType : null; + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> + enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.CBOR), bulkContentType)); + assertEquals("Unsupported content-type found for request with content-type [CBOR], only JSON and SMILE are supported", + exception.getMessage()); + + exception = expectThrows(IllegalArgumentException.class, () -> + enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.YAML), bulkContentType)); + assertEquals("Unsupported content-type found for request with content-type [YAML], only JSON and SMILE are supported", + exception.getMessage()); + + XContentType requestContentType = xContentType == XContentType.JSON ? XContentType.SMILE : XContentType.JSON; + + exception = expectThrows(IllegalArgumentException.class, () -> + enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType)); + assertEquals("Mismatching content-type found for request with content-type [" + requestContentType + "], " + + "previous requests have content-type [" + xContentType + "]", exception.getMessage()); + } + + /** + * Randomize the {@link FetchSourceContext} request parameters. + */ + private static void randomizeFetchSourceContextParams(Consumer consumer, Map expectedParams) { + if (randomBoolean()) { + if (randomBoolean()) { + boolean fetchSource = randomBoolean(); + consumer.accept(new FetchSourceContext(fetchSource)); + if (fetchSource == false) { + expectedParams.put("_source", "false"); + } + } else { + int numIncludes = randomIntBetween(0, 5); + String[] includes = new String[numIncludes]; + StringBuilder includesParam = new StringBuilder(); + for (int i = 0; i < numIncludes; i++) { + String include = randomAsciiOfLengthBetween(3, 10); + includes[i] = include; + includesParam.append(include); + if (i < numIncludes - 1) { + includesParam.append(","); + } + } + if (numIncludes > 0) { + expectedParams.put("_source_include", includesParam.toString()); + } + int numExcludes = randomIntBetween(0, 5); + String[] excludes = new String[numExcludes]; + StringBuilder excludesParam = new StringBuilder(); + for (int i = 0; i < numExcludes; i++) { + String exclude = randomAsciiOfLengthBetween(3, 10); + excludes[i] = exclude; + excludesParam.append(exclude); + if (i < numExcludes - 1) { + excludesParam.append(","); + } + } + if (numExcludes > 0) { + expectedParams.put("_source_exclude", excludesParam.toString()); + } + consumer.accept(new FetchSourceContext(true, includes, excludes)); + } + } + } + + private static void setRandomTimeout(ReplicationRequest request, Map expectedParams) { + if (randomBoolean()) { + String timeout = randomTimeValue(); + request.timeout(timeout); + expectedParams.put("timeout", timeout); + } else { + expectedParams.put("timeout", ReplicationRequest.DEFAULT_TIMEOUT.getStringRep()); + } + } + + private static void setRandomRefreshPolicy(ReplicatedWriteRequest request, Map expectedParams) { + if (randomBoolean()) { + WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values()); + request.setRefreshPolicy(refreshPolicy); + if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) { + expectedParams.put("refresh", refreshPolicy.getValue()); + } + } + } + + private static void setRandomVersion(DocWriteRequest request, Map expectedParams) { + if (randomBoolean()) { + long version = randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED, Versions.NOT_FOUND, randomNonNegativeLong()); + request.version(version); + if (version != Versions.MATCH_ANY) { + expectedParams.put("version", Long.toString(version)); + } + } + } + + private static void setRandomVersionType(DocWriteRequest request, Map expectedParams) { + if (randomBoolean()) { + VersionType versionType = randomFrom(VersionType.values()); + request.versionType(versionType); + if (versionType != VersionType.INTERNAL) { + expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT)); + } + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientExtTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientExtTests.java new file mode 100644 index 00000000000..cb32f9ae9dd --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientExtTests.java @@ -0,0 +1,138 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.mockito.Mockito.mock; + +/** + * This test works against a {@link RestHighLevelClient} subclass that simulats how custom response sections returned by + * Elasticsearch plugins can be parsed using the high level client. + */ +public class RestHighLevelClientExtTests extends ESTestCase { + + private RestHighLevelClient restHighLevelClient; + + @Before + public void initClient() throws IOException { + RestClient restClient = mock(RestClient.class); + restHighLevelClient = new RestHighLevelClientExt(restClient); + } + + public void testParseEntityCustomResponseSection() throws IOException { + { + HttpEntity jsonEntity = new StringEntity("{\"custom1\":{ \"field\":\"value\"}}", ContentType.APPLICATION_JSON); + BaseCustomResponseSection customSection = restHighLevelClient.parseEntity(jsonEntity, BaseCustomResponseSection::fromXContent); + assertThat(customSection, instanceOf(CustomResponseSection1.class)); + CustomResponseSection1 customResponseSection1 = (CustomResponseSection1) customSection; + assertEquals("value", customResponseSection1.value); + } + { + HttpEntity jsonEntity = new StringEntity("{\"custom2\":{ \"array\": [\"item1\", \"item2\"]}}", ContentType.APPLICATION_JSON); + BaseCustomResponseSection customSection = restHighLevelClient.parseEntity(jsonEntity, BaseCustomResponseSection::fromXContent); + assertThat(customSection, instanceOf(CustomResponseSection2.class)); + CustomResponseSection2 customResponseSection2 = (CustomResponseSection2) customSection; + assertArrayEquals(new String[]{"item1", "item2"}, customResponseSection2.values); + } + } + + private static class RestHighLevelClientExt extends RestHighLevelClient { + + private RestHighLevelClientExt(RestClient restClient) { + super(restClient, getNamedXContentsExt()); + } + + private static List getNamedXContentsExt() { + List entries = new ArrayList<>(); + entries.add(new NamedXContentRegistry.Entry(BaseCustomResponseSection.class, new ParseField("custom1"), + CustomResponseSection1::fromXContent)); + entries.add(new NamedXContentRegistry.Entry(BaseCustomResponseSection.class, new ParseField("custom2"), + CustomResponseSection2::fromXContent)); + return entries; + } + } + + private abstract static class BaseCustomResponseSection { + + static BaseCustomResponseSection fromXContent(XContentParser parser) throws IOException { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + BaseCustomResponseSection custom = parser.namedObject(BaseCustomResponseSection.class, parser.currentName(), null); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + return custom; + } + } + + private static class CustomResponseSection1 extends BaseCustomResponseSection { + + private final String value; + + private CustomResponseSection1(String value) { + this.value = value; + } + + static CustomResponseSection1 fromXContent(XContentParser parser) throws IOException { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("field", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + CustomResponseSection1 responseSection1 = new CustomResponseSection1(parser.text()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + return responseSection1; + } + } + + private static class CustomResponseSection2 extends BaseCustomResponseSection { + + private final String[] values; + + private CustomResponseSection2(String[] values) { + this.values = values; + } + + static CustomResponseSection2 fromXContent(XContentParser parser) throws IOException { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("array", parser.currentName()); + assertEquals(XContentParser.Token.START_ARRAY, parser.nextToken()); + List values = new ArrayList<>(); + while(parser.nextToken().isValue()) { + values.add(parser.text()); + } + assertEquals(XContentParser.Token.END_ARRAY, parser.currentToken()); + CustomResponseSection2 responseSection2 = new CustomResponseSection2(values.toArray(new String[values.size()])); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + return responseSection2; + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 50229a5d916..897b1f55466 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -27,7 +27,6 @@ import org.apache.http.HttpResponse; import org.apache.http.ProtocolVersion; import org.apache.http.RequestLine; import org.apache.http.StatusLine; -import org.apache.http.entity.BasicHttpEntity; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; @@ -40,6 +39,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.main.MainRequest; import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.cbor.CborXContent; @@ -55,9 +55,9 @@ import org.mockito.internal.matchers.VarargMatcher; import java.io.IOException; import java.net.SocketTimeoutException; import java.util.Collections; +import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; import static org.hamcrest.CoreMatchers.instanceOf; import static org.mockito.Matchers.anyMapOf; @@ -139,17 +139,17 @@ public class RestHighLevelClientTests extends ESTestCase { public void testParseEntity() throws IOException { { - IllegalStateException ise = expectThrows(IllegalStateException.class, () -> RestHighLevelClient.parseEntity(null, null)); + IllegalStateException ise = expectThrows(IllegalStateException.class, () -> restHighLevelClient.parseEntity(null, null)); assertEquals("Response body expected but not returned", ise.getMessage()); } { IllegalStateException ise = expectThrows(IllegalStateException.class, - () -> RestHighLevelClient.parseEntity(new BasicHttpEntity(), null)); + () -> restHighLevelClient.parseEntity(new StringEntity("", (ContentType) null), null)); assertEquals("Elasticsearch didn't return the [Content-Type] header, unable to parse response body", ise.getMessage()); } { StringEntity entity = new StringEntity("", ContentType.APPLICATION_SVG_XML); - IllegalStateException ise = expectThrows(IllegalStateException.class, () -> RestHighLevelClient.parseEntity(entity, null)); + IllegalStateException ise = expectThrows(IllegalStateException.class, () -> restHighLevelClient.parseEntity(entity, null)); assertEquals("Unsupported Content-Type: " + entity.getContentType().getValue(), ise.getMessage()); } { @@ -162,13 +162,13 @@ public class RestHighLevelClientTests extends ESTestCase { return value; }; HttpEntity jsonEntity = new StringEntity("{\"field\":\"value\"}", ContentType.APPLICATION_JSON); - assertEquals("value", RestHighLevelClient.parseEntity(jsonEntity, entityParser)); + assertEquals("value", restHighLevelClient.parseEntity(jsonEntity, entityParser)); HttpEntity yamlEntity = new StringEntity("---\nfield: value\n", ContentType.create("application/yaml")); - assertEquals("value", RestHighLevelClient.parseEntity(yamlEntity, entityParser)); + assertEquals("value", restHighLevelClient.parseEntity(yamlEntity, entityParser)); HttpEntity smileEntity = createBinaryEntity(SmileXContent.contentBuilder(), ContentType.create("application/smile")); - assertEquals("value", RestHighLevelClient.parseEntity(smileEntity, entityParser)); + assertEquals("value", restHighLevelClient.parseEntity(smileEntity, entityParser)); HttpEntity cborEntity = createBinaryEntity(CborXContent.contentBuilder(), ContentType.create("application/cbor")); - assertEquals("value", RestHighLevelClient.parseEntity(cborEntity, entityParser)); + assertEquals("value", restHighLevelClient.parseEntity(cborEntity, entityParser)); } } @@ -195,7 +195,7 @@ public class RestHighLevelClientTests extends ESTestCase { HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); - ElasticsearchException elasticsearchException = RestHighLevelClient.parseResponseException(responseException); + ElasticsearchException elasticsearchException = restHighLevelClient.parseResponseException(responseException); assertEquals(responseException.getMessage(), elasticsearchException.getMessage()); assertEquals(restStatus, elasticsearchException.status()); assertSame(responseException, elasticsearchException.getCause()); @@ -207,7 +207,7 @@ public class RestHighLevelClientTests extends ESTestCase { ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); - ElasticsearchException elasticsearchException = RestHighLevelClient.parseResponseException(responseException); + ElasticsearchException elasticsearchException = restHighLevelClient.parseResponseException(responseException); assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage()); assertEquals(restStatus, elasticsearchException.status()); assertSame(responseException, elasticsearchException.getSuppressed()[0]); @@ -218,7 +218,7 @@ public class RestHighLevelClientTests extends ESTestCase { httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); - ElasticsearchException elasticsearchException = RestHighLevelClient.parseResponseException(responseException); + ElasticsearchException elasticsearchException = restHighLevelClient.parseResponseException(responseException); assertEquals("Unable to parse response body", elasticsearchException.getMessage()); assertEquals(restStatus, elasticsearchException.status()); assertSame(responseException, elasticsearchException.getCause()); @@ -230,7 +230,7 @@ public class RestHighLevelClientTests extends ESTestCase { httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); - ElasticsearchException elasticsearchException = RestHighLevelClient.parseResponseException(responseException); + ElasticsearchException elasticsearchException = restHighLevelClient.parseResponseException(responseException); assertEquals("Unable to parse response body", elasticsearchException.getMessage()); assertEquals(restStatus, elasticsearchException.status()); assertSame(responseException, elasticsearchException.getCause()); @@ -240,7 +240,8 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnSuccess() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); @@ -261,7 +262,8 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); @@ -278,7 +280,8 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithEntity() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", @@ -297,7 +300,8 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); @@ -316,7 +320,8 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); @@ -335,7 +340,8 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); @@ -348,7 +354,8 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); @@ -364,7 +371,8 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); @@ -383,7 +391,7 @@ public class RestHighLevelClientTests extends ESTestCase { public void testWrapResponseListenerOnSuccess() throws IOException { { TrackingActionListener trackingActionListener = new TrackingActionListener(); - ResponseListener responseListener = RestHighLevelClient.wrapResponseListener( + ResponseListener responseListener = restHighLevelClient.wrapResponseListener( response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet()); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); @@ -393,7 +401,7 @@ public class RestHighLevelClientTests extends ESTestCase { } { TrackingActionListener trackingActionListener = new TrackingActionListener(); - ResponseListener responseListener = RestHighLevelClient.wrapResponseListener( + ResponseListener responseListener = restHighLevelClient.wrapResponseListener( response -> {throw new IllegalStateException();}, trackingActionListener, Collections.emptySet()); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); @@ -408,7 +416,7 @@ public class RestHighLevelClientTests extends ESTestCase { public void testWrapResponseListenerOnException() throws IOException { TrackingActionListener trackingActionListener = new TrackingActionListener(); - ResponseListener responseListener = RestHighLevelClient.wrapResponseListener( + ResponseListener responseListener = restHighLevelClient.wrapResponseListener( response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet()); IllegalStateException exception = new IllegalStateException(); responseListener.onFailure(exception); @@ -417,7 +425,7 @@ public class RestHighLevelClientTests extends ESTestCase { public void testWrapResponseListenerOnResponseExceptionWithoutEntity() throws IOException { TrackingActionListener trackingActionListener = new TrackingActionListener(); - ResponseListener responseListener = RestHighLevelClient.wrapResponseListener( + ResponseListener responseListener = restHighLevelClient.wrapResponseListener( response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet()); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); @@ -433,7 +441,7 @@ public class RestHighLevelClientTests extends ESTestCase { public void testWrapResponseListenerOnResponseExceptionWithEntity() throws IOException { TrackingActionListener trackingActionListener = new TrackingActionListener(); - ResponseListener responseListener = RestHighLevelClient.wrapResponseListener( + ResponseListener responseListener = restHighLevelClient.wrapResponseListener( response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet()); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); @@ -452,7 +460,7 @@ public class RestHighLevelClientTests extends ESTestCase { public void testWrapResponseListenerOnResponseExceptionWithBrokenEntity() throws IOException { { TrackingActionListener trackingActionListener = new TrackingActionListener(); - ResponseListener responseListener = RestHighLevelClient.wrapResponseListener( + ResponseListener responseListener = restHighLevelClient.wrapResponseListener( response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet()); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); @@ -469,7 +477,7 @@ public class RestHighLevelClientTests extends ESTestCase { } { TrackingActionListener trackingActionListener = new TrackingActionListener(); - ResponseListener responseListener = RestHighLevelClient.wrapResponseListener( + ResponseListener responseListener = restHighLevelClient.wrapResponseListener( response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet()); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); @@ -488,7 +496,7 @@ public class RestHighLevelClientTests extends ESTestCase { public void testWrapResponseListenerOnResponseExceptionWithIgnores() throws IOException { TrackingActionListener trackingActionListener = new TrackingActionListener(); - ResponseListener responseListener = RestHighLevelClient.wrapResponseListener( + ResponseListener responseListener = restHighLevelClient.wrapResponseListener( response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.singleton(404)); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); @@ -503,7 +511,7 @@ public class RestHighLevelClientTests extends ESTestCase { TrackingActionListener trackingActionListener = new TrackingActionListener(); //response parsing throws exception while handling ignores. same as when GetResponse#fromXContent throws error when trying //to parse a 404 response which contains an error rather than a valid document not found response. - ResponseListener responseListener = RestHighLevelClient.wrapResponseListener( + ResponseListener responseListener = restHighLevelClient.wrapResponseListener( response -> { throw new IllegalStateException(); }, trackingActionListener, Collections.singleton(404)); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); @@ -520,7 +528,7 @@ public class RestHighLevelClientTests extends ESTestCase { TrackingActionListener trackingActionListener = new TrackingActionListener(); //response parsing throws exception while handling ignores. same as when GetResponse#fromXContent throws error when trying //to parse a 404 response which contains an error rather than a valid document not found response. - ResponseListener responseListener = RestHighLevelClient.wrapResponseListener( + ResponseListener responseListener = restHighLevelClient.wrapResponseListener( response -> { throw new IllegalStateException(); }, trackingActionListener, Collections.singleton(404)); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", @@ -535,6 +543,11 @@ public class RestHighLevelClientTests extends ESTestCase { assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage()); } + public void testNamedXContents() throws IOException { + List namedXContents = RestHighLevelClient.getNamedXContents(); + assertEquals(0, namedXContents.size()); + } + private static class TrackingActionListener implements ActionListener { private final AtomicInteger statusCode = new AtomicInteger(-1); private final AtomicReference exception = new AtomicReference<>(); diff --git a/client/rest/build.gradle b/client/rest/build.gradle index d5d9c9cfbb5..19ec584a103 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -49,8 +49,9 @@ dependencies { } forbiddenApisMain { - //client does not depend on core, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + //client does not depend on core, so only jdk and http signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), + PrecommitTasks.getResource('/forbidden/http-signatures.txt')] } forbiddenApisTest { @@ -58,7 +59,8 @@ forbiddenApisTest { bundledSignatures -= 'jdk-non-portable' bundledSignatures += 'jdk-internal' //client does not depend on core, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), + PrecommitTasks.getResource('/forbidden/http-signatures.txt')] } dependencyLicenses { diff --git a/client/rest/src/test/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumerTests.java b/client/rest/src/test/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumerTests.java index 2488ea4b435..85b7090bb94 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumerTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumerTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.client; import org.apache.http.ContentTooLongException; +import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.ProtocolVersion; import org.apache.http.StatusLine; @@ -32,6 +33,8 @@ import org.apache.http.nio.ContentDecoder; import org.apache.http.nio.IOControl; import org.apache.http.protocol.HttpContext; +import java.util.concurrent.atomic.AtomicReference; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; @@ -56,7 +59,7 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase { ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); HttpResponse httpResponse = new BasicHttpResponse(statusLine); - httpResponse.setEntity(new StringEntity("test")); + httpResponse.setEntity(new StringEntity("test", ContentType.TEXT_PLAIN)); //everything goes well consumer.responseReceived(httpResponse); @@ -99,11 +102,17 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase { StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); consumer.onResponseReceived(new BasicHttpResponse(statusLine)); - BasicHttpEntity entity = new BasicHttpEntity(); - entity.setContentLength(randomInt(bufferLimit)); + final AtomicReference contentLength = new AtomicReference<>(); + HttpEntity entity = new StringEntity("", ContentType.APPLICATION_JSON) { + @Override + public long getContentLength() { + return contentLength.get(); + } + }; + contentLength.set(randomLong(bufferLimit)); consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON); - entity.setContentLength(randomIntBetween(bufferLimit + 1, MAX_TEST_BUFFER_SIZE)); + contentLength.set(randomLongBetween(bufferLimit + 1, MAX_TEST_BUFFER_SIZE)); try { consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON); } catch(ContentTooLongException e) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java index 68717dfe223..637e1807d25 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java @@ -31,6 +31,7 @@ import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpTrace; import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.entity.ContentType; import org.apache.http.entity.InputStreamEntity; import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; @@ -71,20 +72,21 @@ public class RequestLoggerTests extends RestClientTestCase { HttpEntity entity; switch(randomIntBetween(0, 4)) { case 0: - entity = new StringEntity(requestBody, StandardCharsets.UTF_8); + entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON); break; case 1: - entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8))); + entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8)), + ContentType.APPLICATION_JSON); break; case 2: - entity = new NStringEntity(requestBody, StandardCharsets.UTF_8); + entity = new NStringEntity(requestBody, ContentType.APPLICATION_JSON); break; case 3: - entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8)); + entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON); break; case 4: // Evil entity without a charset - entity = new StringEntity(requestBody, (Charset) null); + entity = new StringEntity(requestBody, ContentType.create("application/json", (Charset) null)); break; default: throw new UnsupportedOperationException(); @@ -122,15 +124,16 @@ public class RequestLoggerTests extends RestClientTestCase { HttpEntity entity; switch(randomIntBetween(0, 2)) { case 0: - entity = new StringEntity(responseBody, StandardCharsets.UTF_8); + entity = new StringEntity(responseBody, ContentType.APPLICATION_JSON); break; case 1: //test a non repeatable entity - entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8))); + entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)), + ContentType.APPLICATION_JSON); break; case 2: // Evil entity without a charset - entity = new StringEntity(responseBody, (Charset) null); + entity = new StringEntity(responseBody, ContentType.create("application/json", (Charset) null)); break; default: throw new UnsupportedOperationException(); diff --git a/client/rest/src/test/java/org/elasticsearch/client/ResponseExceptionTests.java b/client/rest/src/test/java/org/elasticsearch/client/ResponseExceptionTests.java index 9185222f510..1638693a44f 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/ResponseExceptionTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/ResponseExceptionTests.java @@ -25,6 +25,7 @@ import org.apache.http.HttpResponse; import org.apache.http.ProtocolVersion; import org.apache.http.RequestLine; import org.apache.http.StatusLine; +import org.apache.http.entity.ContentType; import org.apache.http.entity.InputStreamEntity; import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHttpResponse; @@ -52,10 +53,11 @@ public class ResponseExceptionTests extends RestClientTestCase { if (hasBody) { HttpEntity entity; if (getRandom().nextBoolean()) { - entity = new StringEntity(responseBody, StandardCharsets.UTF_8); + entity = new StringEntity(responseBody, ContentType.APPLICATION_JSON); } else { //test a non repeatable entity - entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8))); + entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)), + ContentType.APPLICATION_JSON); } httpResponse.setEntity(entity); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index e75de2f609c..6d4e3ba4bc8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -28,6 +28,7 @@ import org.apache.http.Header; import org.apache.http.HttpHost; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; +import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; @@ -249,7 +250,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { private Response bodyTest(final RestClient restClient, final String method) throws IOException { String requestBody = "{ \"field\": \"value\" }"; - StringEntity entity = new StringEntity(requestBody); + StringEntity entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON); int statusCode = randomStatusCode(getRandom()); Response esResponse; try { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 69048988ee9..541193c733d 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -38,6 +38,7 @@ import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.client.utils.URIBuilder; import org.apache.http.concurrent.FutureCallback; import org.apache.http.conn.ConnectTimeoutException; +import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.impl.auth.BasicScheme; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; @@ -293,7 +294,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { */ public void testBody() throws IOException { String body = "{ \"field\": \"value\" }"; - StringEntity entity = new StringEntity(body); + StringEntity entity = new StringEntity(body, ContentType.APPLICATION_JSON); for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) { for (int okStatusCode : getOkStatusCodes()) { Response response = restClient.performRequest(method, "/" + okStatusCode, Collections.emptyMap(), entity); @@ -431,7 +432,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { HttpEntity entity = null; boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean(); if (hasBody) { - entity = new StringEntity(randomAsciiOfLengthBetween(10, 100)); + entity = new StringEntity(randomAsciiOfLengthBetween(10, 100), ContentType.APPLICATION_JSON); ((HttpEntityEnclosingRequest) request).setEntity(entity); } diff --git a/core/build.gradle b/core/build.gradle index 6e0b94dd6f9..99da28e2091 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -74,7 +74,7 @@ dependencies { // percentiles aggregation compile 'com.tdunning:t-digest:3.0' // precentil ranks aggregation - compile 'org.hdrhistogram:HdrHistogram:2.1.6' + compile 'org.hdrhistogram:HdrHistogram:2.1.9' // lucene spatial compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional diff --git a/core/licenses/HdrHistogram-2.1.6.jar.sha1 b/core/licenses/HdrHistogram-2.1.6.jar.sha1 deleted file mode 100644 index 26fc16f2e87..00000000000 --- a/core/licenses/HdrHistogram-2.1.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7495feb7f71ee124bd2a7e7d83590e296d71d80e \ No newline at end of file diff --git a/core/licenses/HdrHistogram-2.1.9.jar.sha1 b/core/licenses/HdrHistogram-2.1.9.jar.sha1 new file mode 100644 index 00000000000..2378df07b2c --- /dev/null +++ b/core/licenses/HdrHistogram-2.1.9.jar.sha1 @@ -0,0 +1 @@ +e4631ce165eb400edecfa32e03d3f1be53dee754 \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 1a21082fcbb..c3479ecc0cf 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -304,7 +304,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr * {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly * if needed and then immediately returns. */ - protected static void parseInnerToXContent(XContentParser parser, DocWriteResponseBuilder context) throws IOException { + protected static void parseInnerToXContent(XContentParser parser, Builder context) throws IOException { XContentParser.Token token = parser.currentToken(); ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); @@ -348,9 +348,11 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr } /** - * {@link DocWriteResponseBuilder} is used to build {@link DocWriteResponse} objects during XContent parsing. + * Base class of all {@link DocWriteResponse} builders. These {@link DocWriteResponse.Builder} are used during + * xcontent parsing to temporarily store the parsed values, then the {@link Builder#build()} method is called to + * instantiate the appropriate {@link DocWriteResponse} with the parsed values. */ - public abstract static class DocWriteResponseBuilder { + public abstract static class Builder { protected ShardId shardId = null; protected String type = null; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index f0f8d50b4c1..e60de1e2929 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -139,19 +139,6 @@ public class PutRepositoryRequest extends AcknowledgedRequest - * See repository documentation for more information. - * - * @param source repository-specific snapshot settings - * @return this request - * @deprecated use {@link #settings(String, XContentType)} to avoid content type detection - */ - @Deprecated - public CreateSnapshotRequest settings(String source) { - this.settings = Settings.builder().loadFromSource(source).build(); - return this; - } - /** * Sets repository-specific snapshot settings in JSON or YAML format *

diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index d3b5e12351c..4022d0497c0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -141,21 +141,6 @@ public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuil return this; } - /** - * Sets repository-specific snapshot settings in YAML, JSON or properties format - *

- * See repository documentation for more information. - * - * @param source repository-specific snapshot settings - * @return this builder - * @deprecated use {@link #setSettings(String, XContentType)} to avoid content type detection - */ - @Deprecated - public CreateSnapshotRequestBuilder setSettings(String source) { - request.settings(source); - return this; - } - /** * Sets repository-specific snapshot settings in YAML or JSON format *

diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 9d8ed49aaa0..7e34cb5a596 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -312,21 +312,6 @@ public class RestoreSnapshotRequest extends MasterNodeRequest - * See repository documentation for more information. - * - * @param source repository-specific snapshot settings - * @return this request - * @deprecated use {@link #settings(String, XContentType)} to avoid content type detection - */ - @Deprecated - public RestoreSnapshotRequest settings(String source) { - this.settings = Settings.builder().loadFromSource(source).build(); - return this; - } - /** * Sets repository-specific restore settings in JSON or YAML format *

@@ -450,16 +435,6 @@ public class RestoreSnapshotRequest extends MasterNodeRequest - * See repository documentation for more information. - * - * @param source repository-specific snapshot settings - * @return this builder - * @deprecated use {@link #setSettings(String, XContentType)} to avoid content type detection - */ - @Deprecated - public RestoreSnapshotRequestBuilder setSettings(String source) { - request.settings(source); - return this; - } - /** * Sets repository-specific restore settings in JSON or YAML format *

@@ -263,19 +248,6 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui return this; } - /** - * Sets index settings that should be added or replaced during restore - * - * @param source index settings - * @return this builder - * @deprecated use {@link #setIndexSettings(String, XContentType)} to avoid content type detection - */ - @Deprecated - public RestoreSnapshotRequestBuilder setIndexSettings(String source) { - request.indexSettings(source); - return this; - } - /** * Sets index settings that should be added or replaced during restore * diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 74885800a74..f6a9e055399 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -45,11 +45,6 @@ public class PutStoredScriptRequest extends AcknowledgedRequest return this; } - /** - * The settings to create the index with (either json or yaml format) - * @deprecated use {@link #source(String, XContentType)} instead to avoid content type detection - */ - @Deprecated - public CreateIndexRequest settings(String source) { - this.settings = Settings.builder().loadFromSource(source).build(); - return this; - } - /** * The settings to create the index with (either json or yaml format) */ @@ -215,18 +205,6 @@ public class CreateIndexRequest extends AcknowledgedRequest return this; } - /** - * Adds mapping that will be added when the index gets created. - * - * @param type The mapping type - * @param source The mapping source - * @deprecated use {@link #mapping(String, String, XContentType)} to avoid content type detection - */ - @Deprecated - public CreateIndexRequest mapping(String type, String source) { - return mapping(type, new BytesArray(source), XContentFactory.xContentType(source)); - } - /** * Adds mapping that will be added when the index gets created. * @@ -362,15 +340,6 @@ public class CreateIndexRequest extends AcknowledgedRequest return this; } - /** - * Sets the settings and mappings as a single source. - * @deprecated use {@link #source(String, XContentType)} - */ - @Deprecated - public CreateIndexRequest source(String source) { - return source(new BytesArray(source)); - } - /** * Sets the settings and mappings as a single source. */ @@ -382,16 +351,7 @@ public class CreateIndexRequest extends AcknowledgedRequest * Sets the settings and mappings as a single source. */ public CreateIndexRequest source(XContentBuilder source) { - return source(source.bytes()); - } - - /** - * Sets the settings and mappings as a single source. - * @deprecated use {@link #source(byte[], XContentType)} - */ - @Deprecated - public CreateIndexRequest source(byte[] source) { - return source(source, 0, source.length); + return source(source.bytes(), source.contentType()); } /** @@ -401,15 +361,6 @@ public class CreateIndexRequest extends AcknowledgedRequest return source(source, 0, source.length, xContentType); } - /** - * Sets the settings and mappings as a single source. - * @deprecated use {@link #source(byte[], int, int, XContentType)} - */ - @Deprecated - public CreateIndexRequest source(byte[] source, int offset, int length) { - return source(new BytesArray(source, offset, length)); - } - /** * Sets the settings and mappings as a single source. */ @@ -417,17 +368,6 @@ public class CreateIndexRequest extends AcknowledgedRequest return source(new BytesArray(source, offset, length), xContentType); } - /** - * Sets the settings and mappings as a single source. - * @deprecated use {@link #source(BytesReference, XContentType)} - */ - @Deprecated - public CreateIndexRequest source(BytesReference source) { - XContentType xContentType = XContentFactory.xContentType(source); - source(source, xContentType); - return this; - } - /** * Sets the settings and mappings as a single source. */ diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index 237c88244b4..f7cc45511e0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -110,19 +110,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder im } } - /** - * The mapping source definition. - * @deprecated use {@link #source(String, XContentType)} - */ - @Deprecated - public PutMappingRequest source(String mappingSource) { - return source(mappingSource, XContentFactory.xContentType(mappingSource)); - } - /** * The mapping source definition. */ diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index 012a593ebc4..43bfe78c487 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -81,16 +81,6 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder itemParser = null; if (opType == OpType.INDEX || opType == OpType.CREATE) { - final IndexResponse.IndexResponseBuilder indexResponseBuilder = new IndexResponse.IndexResponseBuilder(); + final IndexResponse.Builder indexResponseBuilder = new IndexResponse.Builder(); builder = indexResponseBuilder; itemParser = (indexParser) -> IndexResponse.parseXContentFields(indexParser, indexResponseBuilder); } else if (opType == OpType.UPDATE) { - final UpdateResponse.UpdateResponseBuilder updateResponseBuilder = new UpdateResponse.UpdateResponseBuilder(); + final UpdateResponse.Builder updateResponseBuilder = new UpdateResponse.Builder(); builder = updateResponseBuilder; itemParser = (updateParser) -> UpdateResponse.parseXContentFields(updateParser, updateResponseBuilder); } else if (opType == OpType.DELETE) { - final DeleteResponse.DeleteResponseBuilder deleteResponseBuilder = new DeleteResponse.DeleteResponseBuilder(); + final DeleteResponse.Builder deleteResponseBuilder = new DeleteResponse.Builder(); builder = deleteResponseBuilder; itemParser = (deleteParser) -> DeleteResponse.parseXContentFields(deleteParser, deleteResponseBuilder); } else { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index cbfc8431628..fdb2ef3aba2 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -289,15 +289,6 @@ public class BulkProcessor implements Closeable { executeIfNeeded(); } - /** - * Adds the data from the bytes to be processed by the bulk processor - * @deprecated use {@link #add(BytesReference, String, String, XContentType)} instead to avoid content type auto-detection - */ - @Deprecated - public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception { - return add(data, defaultIndex, defaultType, null, null); - } - /** * Adds the data from the bytes to be processed by the bulk processor */ @@ -306,19 +297,6 @@ public class BulkProcessor implements Closeable { return add(data, defaultIndex, defaultType, null, null, xContentType); } - /** - * Adds the data from the bytes to be processed by the bulk processor - * @deprecated use {@link #add(BytesReference, String, String, String, Object, XContentType)} instead to avoid content type - * auto-detection - */ - @Deprecated - public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, - @Nullable String defaultPipeline, @Nullable Object payload) throws Exception { - bulkRequest.add(data, defaultIndex, defaultType, null, null, null, defaultPipeline, payload, true); - executeIfNeeded(); - return this; - } - /** * Adds the data from the bytes to be processed by the bulk processor */ diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 30d2f4d1fc8..b60728b9d45 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -243,15 +243,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques return sizeInBytes; } - /** - * Adds a framed data in binary format - * @deprecated use {@link #add(byte[], int, int, XContentType)} - */ - @Deprecated - public BulkRequest add(byte[] data, int from, int length) throws IOException { - return add(data, from, length, null, null); - } - /** * Adds a framed data in binary format */ @@ -259,15 +250,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques return add(data, from, length, null, null, xContentType); } - /** - * Adds a framed data in binary format - * @deprecated use {@link #add(byte[], int, int, String, String, XContentType)} - */ - @Deprecated - public BulkRequest add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType) throws IOException { - return add(new BytesArray(data, from, length), defaultIndex, defaultType); - } - /** * Adds a framed data in binary format */ @@ -276,16 +258,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques return add(new BytesArray(data, from, length), defaultIndex, defaultType, xContentType); } - /** - * Adds a framed data in binary format - * - * @deprecated use {@link #add(BytesReference, String, String, XContentType)} - */ - @Deprecated - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws IOException { - return add(data, defaultIndex, defaultType, null, null, null, null, null, true); - } - /** * Adds a framed data in binary format */ @@ -294,16 +266,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques return add(data, defaultIndex, defaultType, null, null, null, null, null, true, xContentType); } - /** - * Adds a framed data in binary format - * - * @deprecated use {@link #add(BytesReference, String, String, boolean, XContentType)} - */ - @Deprecated - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws IOException { - return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex); - } - /** * Adds a framed data in binary format */ @@ -312,13 +274,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex, xContentType); } - @Deprecated - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws IOException { - XContentType xContentType = XContentFactory.xContentType(data); - return add(data, defaultIndex, defaultType, defaultRouting, defaultFields, defaultFetchSourceContext, defaultPipeline, payload, - allowExplicitIndex, xContentType); - } - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex, XContentType xContentType) throws IOException { @@ -432,7 +387,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques } line++; - // order is important, we set parent after routing, so routing will be set to parent if not set explicitly // we use internalAdd so we don't fork here, this allows us not to copy over the big byte array to small chunks // of index request. if ("index".equals(action)) { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index 8f634fa28a4..7d2bca54d15 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -96,16 +96,6 @@ public class BulkRequestBuilder extends ActionRequestBuilder { +public class BulkResponse extends ActionResponse implements Iterable, StatusToXContentObject { + + private static final String ITEMS = "items"; + private static final String ERRORS = "errors"; + private static final String TOOK = "took"; + private static final String INGEST_TOOK = "ingest_took"; public static final long NO_INGEST_TOOK = -1L; @@ -141,4 +156,61 @@ public class BulkResponse extends ActionResponse implements Iterable items = new ArrayList<>(); + + String currentFieldName = parser.currentName(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (TOOK.equals(currentFieldName)) { + took = parser.longValue(); + } else if (INGEST_TOOK.equals(currentFieldName)) { + ingestTook = parser.longValue(); + } else if (ERRORS.equals(currentFieldName) == false) { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (ITEMS.equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + items.add(BulkItemResponse.fromXContent(parser, items.size())); + } + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else { + throwUnknownToken(token, parser.getTokenLocation()); + } + } + return new BulkResponse(items.toArray(new BulkItemResponse[items.size()]), took, ingestTook); + } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index c270c51ea38..8e2dde7db63 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -36,7 +36,7 @@ public class BulkShardRequest extends ReplicatedWriteRequest { public BulkShardRequest() { } - BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) { + public BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) { super(shardId); this.items = items; setRefreshPolicy(refreshPolicy); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index b51ce624800..aa368c13fb8 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -36,7 +36,8 @@ public class BulkShardResponse extends ReplicationResponse implements WriteRespo BulkShardResponse() { } - BulkShardResponse(ShardId shardId, BulkItemResponse[] responses) { + // NOTE: public for testing only + public BulkShardResponse(ShardId shardId, BulkItemResponse[] responses) { this.shardId = shardId; this.responses = responses; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index fc580dd3880..efc01ab45f8 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -104,14 +104,10 @@ public class TransportShardBulkAction extends TransportWriteAction shardOperationOnPrimary( BulkShardRequest request, IndexShard primary) throws Exception { final IndexMetaData metaData = primary.indexSettings().getIndexMetaData(); - - long[] preVersions = new long[request.items().length]; - VersionType[] preVersionTypes = new VersionType[request.items().length]; Translog.Location location = null; for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) { - location = executeBulkItemRequest(metaData, primary, request, preVersions, preVersionTypes, location, requestIndex); + location = executeBulkItemRequest(metaData, primary, request, location, requestIndex); } - BulkItemResponse[] responses = new BulkItemResponse[request.items().length]; BulkItemRequest[] items = request.items(); for (int i = 0; i < items.length; i++) { @@ -124,110 +120,73 @@ public class TransportShardBulkAction extends TransportWriteAction) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); - } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); - } - // if its a conflict failure, and we already executed the request on a primary (and we execute it - // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) - // then just use the response we got from the successful execution - if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) { - replicaRequest.setIgnoreOnReplica(); - replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(), - new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure))); - } - } - assert replicaRequest.getPrimaryResponse() != null; - assert preVersionTypes[requestIndex] != null; - } catch (Exception e) { - // rethrow the failure if we are going to retry on primary and let parent failure to handle it - if (retryPrimaryException(e)) { - // restore updated versions... - for (int j = 0; j < requestIndex; j++) { - DocWriteRequest docWriteRequest = request.items()[j].request(); - docWriteRequest.version(preVersions[j]); - docWriteRequest.versionType(preVersionTypes[j]); - } - } - throw e; + final DocWriteRequest.OpType opType = itemRequest.opType(); + final Engine.Result operationResult; + final DocWriteResponse response; + final BulkItemRequest replicaRequest; + switch (itemRequest.opType()) { + case CREATE: + case INDEX: + final IndexRequest indexRequest = (IndexRequest) itemRequest; + Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction); + response = indexResult.hasFailure() ? null : + new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(), + indexResult.getVersion(), indexResult.isCreated()); + operationResult = indexResult; + replicaRequest = request.items()[requestIndex]; + break; + case UPDATE: + UpdateResultHolder updateResultHolder = executeUpdateRequest(((UpdateRequest) itemRequest), + primary, metaData, request, requestIndex); + operationResult = updateResultHolder.operationResult; + response = updateResultHolder.response; + replicaRequest = updateResultHolder.replicaRequest; + break; + case DELETE: + final DeleteRequest deleteRequest = (DeleteRequest) itemRequest; + Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary); + response = deleteResult.hasFailure() ? null : + new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(), + deleteResult.getVersion(), deleteResult.isFound()); + operationResult = deleteResult; + replicaRequest = request.items()[requestIndex]; + break; + default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found"); } + + // update the bulk item request because update request execution can mutate the bulk item request + request.items()[requestIndex] = replicaRequest; + if (operationResult == null) { // in case of noop update operation + assert response.getResult() == DocWriteResponse.Result.NOOP + : "only noop update can have null operation"; + replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response)); + } else if (operationResult.hasFailure() == false) { + location = locationToSync(location, operationResult.getTranslogLocation()); + BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response); + replicaRequest.setPrimaryResponse(primaryResponse); + // set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though. + primaryResponse.getResponse().setShardInfo(new ShardInfo()); + } else { + DocWriteRequest docWriteRequest = replicaRequest.request(); + Exception failure = operationResult.getFailure(); + if (isConflictException(failure)) { + logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); + } else { + logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); + } + // if its a conflict failure, and we already executed the request on a primary (and we execute it + // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) + // then just use the response we got from the successful execution + if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) { + replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(), + new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure))); + } + } + assert replicaRequest.getPrimaryResponse() != null; return location; } @@ -266,7 +225,7 @@ public class TransportShardBulkAction extends TransportWriteAction implement /** * Constructs a new index request against the specific index and type. The - * {@link #source(byte[])} must be set. + * {@link #source(byte[], XContentType)} must be set. */ public IndexRequest(String index, String type) { this.index = index; @@ -316,16 +316,6 @@ public class IndexRequest extends ReplicatedWriteRequest implement } } - /** - * Sets the document source to index. - * - * @deprecated use {@link #source(String, XContentType)} - */ - @Deprecated - public IndexRequest source(String source) { - return source(new BytesArray(source), XContentFactory.xContentType(source)); - } - /** * Sets the document source to index. * @@ -383,16 +373,6 @@ public class IndexRequest extends ReplicatedWriteRequest implement } } - /** - * Sets the document to index in bytes form. - * @deprecated use {@link #source(BytesReference, XContentType)} - */ - @Deprecated - public IndexRequest source(BytesReference source) { - return source(source, XContentFactory.xContentType(source)); - - } - /** * Sets the document to index in bytes form. */ @@ -402,15 +382,6 @@ public class IndexRequest extends ReplicatedWriteRequest implement return this; } - /** - * Sets the document to index in bytes form. - * @deprecated use {@link #source(byte[], XContentType)} - */ - @Deprecated - public IndexRequest source(byte[] source) { - return source(source, 0, source.length); - } - /** * Sets the document to index in bytes form. */ @@ -418,20 +389,6 @@ public class IndexRequest extends ReplicatedWriteRequest implement return source(source, 0, source.length, xContentType); } - /** - * Sets the document to index in bytes form (assumed to be safe to be used from different - * threads). - * - * @param source The source to index - * @param offset The offset in the byte array - * @param length The length of the data - * @deprecated use {@link #source(byte[], int, int, XContentType)} - */ - @Deprecated - public IndexRequest source(byte[] source, int offset, int length) { - return source(new BytesArray(source, offset, length), XContentFactory.xContentType(source)); - } - /** * Sets the document to index in bytes form (assumed to be safe to be used from different * threads). diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 7af43ec35ec..88b094a33f5 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -80,16 +80,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder - * Note, its preferable to either set it using {@link #setSource(org.elasticsearch.common.xcontent.XContentBuilder)} - * or using the {@link #setSource(byte[], XContentType)}. - * @deprecated use {@link #setSource(String, XContentType)} - */ - @Deprecated - public IndexRequestBuilder setSource(String source) { - request.source(source); - return this; - } - /** * Sets the document source to index. *

@@ -150,16 +127,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder exten **/ private final Function nodeIdToConnection; private final SearchTask task; - private final AtomicArray results; + private final SearchPhaseResults results; private final long clusterStateVersion; private final Map aliasFilter; private final Map concreteIndexBoosts; @@ -76,7 +75,7 @@ abstract class AbstractSearchAsyncAction exten Map aliasFilter, Map concreteIndexBoosts, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, long startTime, - long clusterStateVersion, SearchTask task) { + long clusterStateVersion, SearchTask task, SearchPhaseResults resultConsumer) { super(name, request, shardsIts, logger); this.startTime = startTime; this.logger = logger; @@ -87,9 +86,9 @@ abstract class AbstractSearchAsyncAction exten this.listener = listener; this.nodeIdToConnection = nodeIdToConnection; this.clusterStateVersion = clusterStateVersion; - results = new AtomicArray<>(shardsIts.size()); this.concreteIndexBoosts = concreteIndexBoosts; this.aliasFilter = aliasFilter; + this.results = resultConsumer; } /** @@ -105,7 +104,7 @@ abstract class AbstractSearchAsyncAction exten * This is the main entry point for a search. This method starts the search execution of the initial phase. */ public final void start() { - if (results.length() == 0) { + if (getNumShards() == 0) { //no search shards to search on, bail with empty response //(it happens with search across _all with no indices around and consistent with broadcast operations) listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, buildTookInMillis(), @@ -130,8 +129,8 @@ abstract class AbstractSearchAsyncAction exten onPhaseFailure(currentPhase, "all shards failed", null); } else { if (logger.isTraceEnabled()) { - final String resultsFrom = results.asList().stream() - .map(r -> r.value.shardTarget().toString()).collect(Collectors.joining(",")); + final String resultsFrom = results.getSuccessfulResults() + .map(r -> r.shardTarget().toString()).collect(Collectors.joining(",")); logger.trace("[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})", currentPhase.getName(), nextPhase.getName(), resultsFrom, clusterStateVersion); } @@ -178,7 +177,7 @@ abstract class AbstractSearchAsyncAction exten synchronized (shardFailuresMutex) { shardFailures = this.shardFailures.get(); // read again otherwise somebody else has created it? if (shardFailures == null) { // still null so we are the first and create a new instance - shardFailures = new AtomicArray<>(results.length()); + shardFailures = new AtomicArray<>(getNumShards()); this.shardFailures.set(shardFailures); } } @@ -194,7 +193,7 @@ abstract class AbstractSearchAsyncAction exten } } - if (results.get(shardIndex) != null) { + if (results.hasResult(shardIndex)) { assert failure == null : "shard failed before but shouldn't: " + failure; successfulOps.decrementAndGet(); // if this shard was successful before (initial phase) we have to adjust the counter } @@ -207,22 +206,22 @@ abstract class AbstractSearchAsyncAction exten * @param exception the exception explaining or causing the phase failure */ private void raisePhaseFailure(SearchPhaseExecutionException exception) { - for (AtomicArray.Entry entry : results.asList()) { + results.getSuccessfulResults().forEach((entry) -> { try { - Transport.Connection connection = nodeIdToConnection.apply(entry.value.shardTarget().getNodeId()); - sendReleaseSearchContext(entry.value.id(), connection); + Transport.Connection connection = nodeIdToConnection.apply(entry.shardTarget().getNodeId()); + sendReleaseSearchContext(entry.id(), connection); } catch (Exception inner) { inner.addSuppressed(exception); logger.trace("failed to release context", inner); } - } + }); listener.onFailure(exception); } @Override public final void onShardSuccess(int shardIndex, Result result) { successfulOps.incrementAndGet(); - results.set(shardIndex, result); + results.consumeResult(shardIndex, result); if (logger.isTraceEnabled()) { logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null); } @@ -242,7 +241,7 @@ abstract class AbstractSearchAsyncAction exten @Override public final int getNumShards() { - return results.length(); + return results.getNumShards(); } @Override @@ -262,7 +261,7 @@ abstract class AbstractSearchAsyncAction exten @Override public final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) { - return new SearchResponse(internalSearchResponse, scrollId, results.length(), successfulOps.get(), + return new SearchResponse(internalSearchResponse, scrollId, getNumShards(), successfulOps.get(), buildTookInMillis(), buildShardFailures()); } @@ -310,6 +309,5 @@ abstract class AbstractSearchAsyncAction exten * executed shard request * @param context the search context for the next phase */ - protected abstract SearchPhase getNextPhase(AtomicArray results, SearchPhaseContext context); - + protected abstract SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context); } diff --git a/core/src/main/java/org/elasticsearch/action/search/CountedCollector.java b/core/src/main/java/org/elasticsearch/action/search/CountedCollector.java index be0ee2c161e..65f2d2d280b 100644 --- a/core/src/main/java/org/elasticsearch/action/search/CountedCollector.java +++ b/core/src/main/java/org/elasticsearch/action/search/CountedCollector.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -30,17 +29,13 @@ import org.elasticsearch.search.SearchShardTarget; * where the given index is used to set the result on the array. */ final class CountedCollector { - private final AtomicArray resultArray; + private final ResultConsumer resultConsumer; private final CountDown counter; private final Runnable onFinish; private final SearchPhaseContext context; - CountedCollector(AtomicArray resultArray, int expectedOps, Runnable onFinish, SearchPhaseContext context) { - if (expectedOps > resultArray.length()) { - throw new IllegalStateException("unexpected number of operations. got: " + expectedOps + " but array size is: " - + resultArray.length()); - } - this.resultArray = resultArray; + CountedCollector(ResultConsumer resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) { + this.resultConsumer = resultConsumer; this.counter = new CountDown(expectedOps); this.onFinish = onFinish; this.context = context; @@ -63,7 +58,7 @@ final class CountedCollector { void onResult(int index, R result, SearchShardTarget target) { try { result.shardTarget(target); - resultArray.set(index, result); + resultConsumer.consume(index, result); } finally { countDown(); } @@ -80,4 +75,12 @@ final class CountedCollector { countDown(); } } + + /** + * A functional interface to plug in shard result consumers to this collector + */ + @FunctionalInterface + public interface ResultConsumer { + void consume(int shardIndex, R result); + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/core/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index 5447b9eee8f..0ac3c69b8eb 100644 --- a/core/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/core/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -40,18 +40,19 @@ import java.util.function.Function; * @see CountedCollector#onFailure(int, SearchShardTarget, Exception) */ final class DfsQueryPhase extends SearchPhase { - private final AtomicArray queryResult; + private final InitialSearchPhase.SearchPhaseResults queryResult; private final SearchPhaseController searchPhaseController; private final AtomicArray dfsSearchResults; - private final Function, SearchPhase> nextPhaseFactory; + private final Function, SearchPhase> nextPhaseFactory; private final SearchPhaseContext context; private final SearchTransportService searchTransportService; DfsQueryPhase(AtomicArray dfsSearchResults, SearchPhaseController searchPhaseController, - Function, SearchPhase> nextPhaseFactory, SearchPhaseContext context) { + Function, SearchPhase> nextPhaseFactory, + SearchPhaseContext context) { super("dfs_query"); - this.queryResult = new AtomicArray<>(dfsSearchResults.length()); + this.queryResult = searchPhaseController.newSearchPhaseResults(context.getRequest(), context.getNumShards()); this.searchPhaseController = searchPhaseController; this.dfsSearchResults = dfsSearchResults; this.nextPhaseFactory = nextPhaseFactory; @@ -64,7 +65,8 @@ final class DfsQueryPhase extends SearchPhase { // TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs // to free up memory early final AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsSearchResults); - final CountedCollector counter = new CountedCollector<>(queryResult, dfsSearchResults.asList().size(), + final CountedCollector counter = new CountedCollector<>(queryResult::consumeResult, + dfsSearchResults.asList().size(), () -> { context.executeNextPhase(this, nextPhaseFactory.apply(queryResult)); }, context); diff --git a/core/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/core/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 1215e97ae3a..20d91770675 100644 --- a/core/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/core/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -49,29 +49,31 @@ final class FetchSearchPhase extends SearchPhase { private final Function nextPhaseFactory; private final SearchPhaseContext context; private final Logger logger; + private final InitialSearchPhase.SearchPhaseResults resultConsumer; - FetchSearchPhase(AtomicArray queryResults, + FetchSearchPhase(InitialSearchPhase.SearchPhaseResults resultConsumer, SearchPhaseController searchPhaseController, SearchPhaseContext context) { - this(queryResults, searchPhaseController, context, + this(resultConsumer, searchPhaseController, context, (response) -> new ExpandSearchPhase(context, response, // collapse only happens if the request has inner hits (finalResponse) -> sendResponsePhase(finalResponse, context))); } - FetchSearchPhase(AtomicArray queryResults, + FetchSearchPhase(InitialSearchPhase.SearchPhaseResults resultConsumer, SearchPhaseController searchPhaseController, SearchPhaseContext context, Function nextPhaseFactory) { super("fetch"); - if (context.getNumShards() != queryResults.length()) { + if (context.getNumShards() != resultConsumer.getNumShards()) { throw new IllegalStateException("number of shards must match the length of the query results but doesn't:" - + context.getNumShards() + "!=" + queryResults.length()); + + context.getNumShards() + "!=" + resultConsumer.getNumShards()); } - this.fetchResults = new AtomicArray<>(queryResults.length()); + this.fetchResults = new AtomicArray<>(resultConsumer.getNumShards()); this.searchPhaseController = searchPhaseController; - this.queryResults = queryResults; + this.queryResults = resultConsumer.results; this.nextPhaseFactory = nextPhaseFactory; this.context = context; this.logger = context.getLogger(); + this.resultConsumer = resultConsumer; } @@ -99,7 +101,7 @@ final class FetchSearchPhase extends SearchPhase { ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(isScrollSearch, queryResults); String scrollId = isScrollSearch ? TransportSearchHelper.buildScrollId(queryResults) : null; List> queryResultsAsList = queryResults.asList(); - final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResultsAsList); + final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce(); final boolean queryAndFetchOptimization = queryResults.length() == 1; final Runnable finishPhase = () -> moveToNextPhase(searchPhaseController, sortedShardDocs, scrollId, reducedQueryPhase, queryAndFetchOptimization ? @@ -119,7 +121,7 @@ final class FetchSearchPhase extends SearchPhase { final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch ? searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, sortedShardDocs, numShards) : null; - final CountedCollector counter = new CountedCollector<>(fetchResults, + final CountedCollector counter = new CountedCollector<>(fetchResults::set, docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not finishPhase, context); for (int i = 0; i < docIdsToLoad.length; i++) { diff --git a/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java b/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java index dac215801fc..f21e9d228d6 100644 --- a/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java +++ b/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java @@ -28,12 +28,14 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.transport.ConnectTransportException; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Stream; /** * This is an abstract base class that encapsulates the logic to fan out to all shards in provided {@link GroupShardsIterator} @@ -213,4 +215,53 @@ abstract class InitialSearchPhase extends * @param listener the listener to notify on response */ protected abstract void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener listener); + + /** + * This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing + */ + static class SearchPhaseResults { + final AtomicArray results; + + SearchPhaseResults(int size) { + results = new AtomicArray<>(size); + } + + /** + * Returns the number of expected results this class should collect + */ + final int getNumShards() { + return results.length(); + } + + /** + * A stream of all non-null (successful) shard results + */ + final Stream getSuccessfulResults() { + return results.asList().stream().map(e -> e.value); + } + + /** + * Consumes a single shard result + * @param shardIndex the shards index, this is a 0-based id that is used to establish a 1 to 1 mapping to the searched shards + * @param result the shards result + */ + void consumeResult(int shardIndex, Result result) { + assert results.get(shardIndex) == null : "shardIndex: " + shardIndex + " is already set"; + results.set(shardIndex, result); + } + + /** + * Returns true iff a result if present for the given shard ID. + */ + final boolean hasResult(int shardIndex) { + return results.get(shardIndex) != null; + } + + /** + * Reduces the collected results + */ + SearchPhaseController.ReducedQueryPhase reduce() { + throw new UnsupportedOperationException("reduce is not supported"); + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 2cf0c317d00..d846c42dbea 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.transport.Transport; @@ -43,7 +42,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction ActionListener listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) { super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, - request, listener, shardsIts, startTime, clusterStateVersion, task); + request, listener, shardsIts, startTime, clusterStateVersion, task, new SearchPhaseResults<>(shardsIts.size())); this.searchPhaseController = searchPhaseController; } @@ -54,8 +53,8 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction } @Override - protected SearchPhase getNextPhase(AtomicArray results, SearchPhaseContext context) { - return new DfsQueryPhase(results, searchPhaseController, + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new DfsQueryPhase(results.results, searchPhaseController, (queryResults) -> new FetchSearchPhase(queryResults, searchPhaseController, context), context); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java index 6786e60fd61..1a21eb3cc34 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java @@ -114,4 +114,5 @@ interface SearchPhaseContext extends ActionListener, Executor { * a response is returned to the user indicating that all shards have failed. */ void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase); + } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 5193fe72784..52fbf952fe4 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -44,6 +44,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -70,14 +71,6 @@ import java.util.stream.StreamSupport; public class SearchPhaseController extends AbstractComponent { - private static final Comparator> QUERY_RESULT_ORDERING = (o1, o2) -> { - int i = o1.value.shardTarget().getIndex().compareTo(o2.value.shardTarget().getIndex()); - if (i == 0) { - i = o1.value.shardTarget().getShardId().id() - o2.value.shardTarget().getShardId().id(); - } - return i; - }; - private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0]; private final BigArrays bigArrays; @@ -149,6 +142,9 @@ public class SearchPhaseController extends AbstractComponent { * named completion suggestion across all shards. If more than one named completion suggestion is specified in the * request, the suggest docs for a named suggestion are ordered by the suggestion name. * + * Note: The order of the sorted score docs depends on the shard index in the result array if the merge process needs to disambiguate + * the result. In oder to obtain stable results the shard index (index of the result in the result array) must be the same. + * * @param ignoreFrom Whether to ignore the from and sort all hits in each shard result. * Enabled only for scroll search, because that only retrieves hits of length 'size' in the query phase. * @param resultsArr Shard result holder @@ -159,26 +155,31 @@ public class SearchPhaseController extends AbstractComponent { return EMPTY_DOCS; } + final QuerySearchResult result; boolean canOptimize = false; - QuerySearchResult result = null; int shardIndex = -1; if (results.size() == 1) { canOptimize = true; result = results.get(0).value.queryResult(); shardIndex = results.get(0).index; } else { + boolean hasResult = false; + QuerySearchResult resultToOptimize = null; // lets see if we only got hits from a single shard, if so, we can optimize... for (AtomicArray.Entry entry : results) { if (entry.value.queryResult().hasHits()) { - if (result != null) { // we already have one, can't really optimize + if (hasResult) { // we already have one, can't really optimize canOptimize = false; break; } canOptimize = true; - result = entry.value.queryResult(); + hasResult = true; + resultToOptimize = entry.value.queryResult(); shardIndex = entry.index; } } + result = canOptimize ? resultToOptimize : results.get(0).value.queryResult(); + assert result != null; } if (canOptimize) { int offset = result.from(); @@ -224,74 +225,62 @@ public class SearchPhaseController extends AbstractComponent { return docs; } - @SuppressWarnings("unchecked") - AtomicArray.Entry[] sortedResults = results.toArray(new AtomicArray.Entry[results.size()]); - Arrays.sort(sortedResults, QUERY_RESULT_ORDERING); - QuerySearchResultProvider firstResult = sortedResults[0].value; - - int topN = firstResult.queryResult().size(); - int from = firstResult.queryResult().from(); - if (ignoreFrom) { - from = 0; - } + final int topN = result.queryResult().size(); + final int from = ignoreFrom ? 0 : result.queryResult().from(); final TopDocs mergedTopDocs; - int numShards = resultsArr.length(); - if (firstResult.queryResult().topDocs() instanceof CollapseTopFieldDocs) { - CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) firstResult.queryResult().topDocs(); + final int numShards = resultsArr.length(); + if (result.queryResult().topDocs() instanceof CollapseTopFieldDocs) { + CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) result.queryResult().topDocs(); final Sort sort = new Sort(firstTopDocs.fields); final CollapseTopFieldDocs[] shardTopDocs = new CollapseTopFieldDocs[numShards]; - for (AtomicArray.Entry sortedResult : sortedResults) { + if (result.size() != shardTopDocs.length) { + // TopDocs#merge can't deal with null shard TopDocs + final CollapseTopFieldDocs empty = new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0], + sort.getSort(), new Object[0], Float.NaN); + Arrays.fill(shardTopDocs, empty); + } + for (AtomicArray.Entry sortedResult : results) { TopDocs topDocs = sortedResult.value.queryResult().topDocs(); // the 'index' field is the position in the resultsArr atomic array shardTopDocs[sortedResult.index] = (CollapseTopFieldDocs) topDocs; } - // TopDocs#merge can't deal with null shard TopDocs - for (int i = 0; i < shardTopDocs.length; ++i) { - if (shardTopDocs[i] == null) { - shardTopDocs[i] = new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0], - sort.getSort(), new Object[0], Float.NaN); - } - } mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs); - } else if (firstResult.queryResult().topDocs() instanceof TopFieldDocs) { - TopFieldDocs firstTopDocs = (TopFieldDocs) firstResult.queryResult().topDocs(); + } else if (result.queryResult().topDocs() instanceof TopFieldDocs) { + TopFieldDocs firstTopDocs = (TopFieldDocs) result.queryResult().topDocs(); final Sort sort = new Sort(firstTopDocs.fields); final TopFieldDocs[] shardTopDocs = new TopFieldDocs[resultsArr.length()]; - for (AtomicArray.Entry sortedResult : sortedResults) { + if (result.size() != shardTopDocs.length) { + // TopDocs#merge can't deal with null shard TopDocs + final TopFieldDocs empty = new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN); + Arrays.fill(shardTopDocs, empty); + } + for (AtomicArray.Entry sortedResult : results) { TopDocs topDocs = sortedResult.value.queryResult().topDocs(); // the 'index' field is the position in the resultsArr atomic array shardTopDocs[sortedResult.index] = (TopFieldDocs) topDocs; } - // TopDocs#merge can't deal with null shard TopDocs - for (int i = 0; i < shardTopDocs.length; ++i) { - if (shardTopDocs[i] == null) { - shardTopDocs[i] = new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN); - } - } mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs); } else { final TopDocs[] shardTopDocs = new TopDocs[resultsArr.length()]; - for (AtomicArray.Entry sortedResult : sortedResults) { + if (result.size() != shardTopDocs.length) { + // TopDocs#merge can't deal with null shard TopDocs + Arrays.fill(shardTopDocs, Lucene.EMPTY_TOP_DOCS); + } + for (AtomicArray.Entry sortedResult : results) { TopDocs topDocs = sortedResult.value.queryResult().topDocs(); // the 'index' field is the position in the resultsArr atomic array shardTopDocs[sortedResult.index] = topDocs; } - // TopDocs#merge can't deal with null shard TopDocs - for (int i = 0; i < shardTopDocs.length; ++i) { - if (shardTopDocs[i] == null) { - shardTopDocs[i] = Lucene.EMPTY_TOP_DOCS; - } - } mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs); } ScoreDoc[] scoreDocs = mergedTopDocs.scoreDocs; final Map>> groupedCompletionSuggestions = new HashMap<>(); // group suggestions and assign shard index - for (AtomicArray.Entry sortedResult : sortedResults) { + for (AtomicArray.Entry sortedResult : results) { Suggest shardSuggest = sortedResult.value.queryResult().suggest(); if (shardSuggest != null) { for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) { @@ -461,23 +450,54 @@ public class SearchPhaseController extends AbstractComponent { /** * Reduces the given query results and consumes all aggregations and profile results. + * @param queryResults a list of non-null query shard results + */ + public final ReducedQueryPhase reducedQueryPhase(List> queryResults) { + return reducedQueryPhase(queryResults, null, 0); + } + + /** + * Reduces the given query results and consumes all aggregations and profile results. + * @param queryResults a list of non-null query shard results + * @param bufferdAggs a list of pre-collected / buffered aggregations. if this list is non-null all aggregations have been consumed + * from all non-null query results. + * @param numReducePhases the number of non-final reduce phases applied to the query results. * @see QuerySearchResult#consumeAggs() * @see QuerySearchResult#consumeProfileResult() */ - public final ReducedQueryPhase reducedQueryPhase(List> queryResults) { + private ReducedQueryPhase reducedQueryPhase(List> queryResults, + List bufferdAggs, int numReducePhases) { + assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases; + numReducePhases++; // increment for this phase long totalHits = 0; long fetchHits = 0; float maxScore = Float.NEGATIVE_INFINITY; boolean timedOut = false; Boolean terminatedEarly = null; - if (queryResults.isEmpty()) { - return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, null, null, null, null); + if (queryResults.isEmpty()) { // early terminate we have nothing to reduce + return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, null, null, null, null, + numReducePhases); } - QuerySearchResult firstResult = queryResults.get(0).value.queryResult(); + final QuerySearchResult firstResult = queryResults.get(0).value.queryResult(); final boolean hasSuggest = firstResult.suggest() != null; - final boolean hasAggs = firstResult.hasAggs(); final boolean hasProfileResults = firstResult.hasProfileResults(); - final List aggregationsList = hasAggs ? new ArrayList<>(queryResults.size()) : Collections.emptyList(); + final boolean consumeAggs; + final List aggregationsList; + if (bufferdAggs != null) { + consumeAggs = false; + // we already have results from intermediate reduces and just need to perform the final reduce + assert firstResult.hasAggs() : "firstResult has no aggs but we got non null buffered aggs?"; + aggregationsList = bufferdAggs; + } else if (firstResult.hasAggs()) { + // the number of shards was less than the buffer size so we reduce agg results directly + aggregationsList = new ArrayList<>(queryResults.size()); + consumeAggs = true; + } else { + // no aggregations + aggregationsList = Collections.emptyList(); + consumeAggs = false; + } + // count the total (we use the query result provider here, since we might not get any hits (we scrolled past them)) final Map> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap(); final Map profileResults = hasProfileResults ? new HashMap<>(queryResults.size()) @@ -506,7 +526,7 @@ public class SearchPhaseController extends AbstractComponent { suggestionList.add(suggestion); } } - if (hasAggs) { + if (consumeAggs) { aggregationsList.add((InternalAggregations) result.consumeAggs()); } if (hasProfileResults) { @@ -515,16 +535,27 @@ public class SearchPhaseController extends AbstractComponent { } } final Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions)); + ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, true); final InternalAggregations aggregations = aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList, - firstResult.pipelineAggregators()); + firstResult.pipelineAggregators(), reduceContext); final SearchProfileShardResults shardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults); return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, firstResult, suggest, aggregations, - shardResults); + shardResults, numReducePhases); + } + + + /** + * Performs an intermediate reduce phase on the aggregations. For instance with this reduce phase never prune information + * that relevant for the final reduce step. For final reduce see {@link #reduceAggs(List, List, ReduceContext)} + */ + private InternalAggregations reduceAggsIncrementally(List aggregationsList) { + ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, false); + return aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList, + null, reduceContext); } private InternalAggregations reduceAggs(List aggregationsList, - List pipelineAggregators) { - ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService); + List pipelineAggregators, ReduceContext reduceContext) { InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, reduceContext); if (pipelineAggregators != null) { List newAggs = StreamSupport.stream(aggregations.spliterator(), false) @@ -558,10 +589,15 @@ public class SearchPhaseController extends AbstractComponent { final InternalAggregations aggregations; // the reduced profile results final SearchProfileShardResults shardResults; + // the number of reduces phases + final int numReducePhases; ReducedQueryPhase(long totalHits, long fetchHits, float maxScore, boolean timedOut, Boolean terminatedEarly, QuerySearchResult oneResult, Suggest suggest, InternalAggregations aggregations, - SearchProfileShardResults shardResults) { + SearchProfileShardResults shardResults, int numReducePhases) { + if (numReducePhases <= 0) { + throw new IllegalArgumentException("at least one reduce phase must have been applied but was: " + numReducePhases); + } this.totalHits = totalHits; this.fetchHits = fetchHits; if (Float.isInfinite(maxScore)) { @@ -575,6 +611,7 @@ public class SearchPhaseController extends AbstractComponent { this.suggest = suggest; this.aggregations = aggregations; this.shardResults = shardResults; + this.numReducePhases = numReducePhases; } /** @@ -582,7 +619,7 @@ public class SearchPhaseController extends AbstractComponent { * @see #merge(boolean, ScoreDoc[], ReducedQueryPhase, AtomicArray) */ public InternalSearchResponse buildResponse(SearchHits hits) { - return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly); + return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly, numReducePhases); } /** @@ -593,4 +630,95 @@ public class SearchPhaseController extends AbstractComponent { } } + /** + * A {@link org.elasticsearch.action.search.InitialSearchPhase.SearchPhaseResults} implementation + * that incrementally reduces aggregation results as shard results are consumed. + * This implementation can be configured to batch up a certain amount of results and only reduce them + * iff the buffer is exhausted. + */ + static final class QueryPhaseResultConsumer + extends InitialSearchPhase.SearchPhaseResults { + private final InternalAggregations[] buffer; + private int index; + private final SearchPhaseController controller; + private int numReducePhases = 0; + + /** + * Creates a new {@link QueryPhaseResultConsumer} + * @param controller a controller instance to reduce the query response objects + * @param expectedResultSize the expected number of query results. Corresponds to the number of shards queried + * @param bufferSize the size of the reduce buffer. if the buffer size is smaller than the number of expected results + * the buffer is used to incrementally reduce aggregation results before all shards responded. + */ + private QueryPhaseResultConsumer(SearchPhaseController controller, int expectedResultSize, int bufferSize) { + super(expectedResultSize); + if (expectedResultSize != 1 && bufferSize < 2) { + throw new IllegalArgumentException("buffer size must be >= 2 if there is more than one expected result"); + } + if (expectedResultSize <= bufferSize) { + throw new IllegalArgumentException("buffer size must be less than the expected result size"); + } + this.controller = controller; + // no need to buffer anything if we have less expected results. in this case we don't consume any results ahead of time. + this.buffer = new InternalAggregations[bufferSize]; + } + + @Override + public void consumeResult(int shardIndex, QuerySearchResultProvider result) { + super.consumeResult(shardIndex, result); + QuerySearchResult queryResult = result.queryResult(); + assert queryResult.hasAggs() : "this collector should only be used if aggs are requested"; + consumeInternal(queryResult); + } + + private synchronized void consumeInternal(QuerySearchResult querySearchResult) { + InternalAggregations aggregations = (InternalAggregations) querySearchResult.consumeAggs(); + if (index == buffer.length) { + InternalAggregations reducedAggs = controller.reduceAggsIncrementally(Arrays.asList(buffer)); + Arrays.fill(buffer, null); + numReducePhases++; + buffer[0] = reducedAggs; + index = 1; + } + final int i = index++; + buffer[i] = aggregations; + } + + private synchronized List getRemaining() { + return Arrays.asList(buffer).subList(0, index); + } + + @Override + public ReducedQueryPhase reduce() { + return controller.reducedQueryPhase(results.asList(), getRemaining(), numReducePhases); + } + + /** + * Returns the number of buffered results + */ + int getNumBuffered() { + return index; + } + + int getNumReducePhases() { return numReducePhases; } + } + + /** + * Returns a new SearchPhaseResults instance. This might return an instance that reduces search responses incrementally. + */ + InitialSearchPhase.SearchPhaseResults newSearchPhaseResults(SearchRequest request, int numShards) { + SearchSourceBuilder source = request.source(); + if (source != null && source.aggregations() != null) { + if (request.getBatchedReduceSize() < numShards) { + // only use this if there are aggs and if there are more shards than we should reduce at once + return new QueryPhaseResultConsumer(this, numShards, request.getBatchedReduceSize()); + } + } + return new InitialSearchPhase.SearchPhaseResults(numShards) { + @Override + public ReducedQueryPhase reduce() { + return reducedQueryPhase(results.asList()); + } + }; + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 8d4edfeb79f..210a9aefda7 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.transport.Transport; @@ -44,17 +43,19 @@ final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction results, SearchPhaseContext context) { + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { return new FetchSearchPhase(results, searchPhaseController, context); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 9c69f1a763f..9e35cca05b9 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -70,6 +70,8 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest private Scroll scroll; + private int batchedReduceSize = 512; + private String[] types = Strings.EMPTY_ARRAY; public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosed(); @@ -274,6 +276,25 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest return this.requestCache; } + /** + * Sets the number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection + * mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. + */ + public void setBatchedReduceSize(int batchedReduceSize) { + if (batchedReduceSize <= 1) { + throw new IllegalArgumentException("batchedReduceSize must be >= 2"); + } + this.batchedReduceSize = batchedReduceSize; + } + + /** + * Returns the number of shard results that should be reduced at once on the coordinating node. This value should be used as a + * protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. + */ + public int getBatchedReduceSize() { + return batchedReduceSize; + } + /** * @return true if the request only has suggest */ @@ -320,6 +341,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest types = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); requestCache = in.readOptionalBoolean(); + batchedReduceSize = in.readVInt(); } @Override @@ -337,6 +359,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest out.writeStringArray(types); indicesOptions.writeIndicesOptions(out); out.writeOptionalBoolean(requestCache); + out.writeVInt(batchedReduceSize); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 865cf01430f..ffe2c1b20c5 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -523,4 +523,13 @@ public class SearchRequestBuilder extends ActionRequestBuilder> extends ReplicationRequest implements WriteRequest { private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; - private long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; - /** * Constructor for deserialization. */ @@ -66,32 +62,11 @@ public abstract class ReplicatedWriteRequest public void readFrom(StreamInput in) throws IOException { super.readFrom(in); refreshPolicy = RefreshPolicy.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { - seqNo = in.readZLong(); - } else { - seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; - } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); refreshPolicy.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { - out.writeZLong(seqNo); - } - } - - /** - * Returns the sequence number for this operation. The sequence number is assigned while the operation - * is performed on the primary shard. - */ - public long getSeqNo() { - return seqNo; - } - - /** sets the sequence number for this operation. should only be called on the primary shard */ - public void setSeqNo(long seqNo) { - this.seqNo = seqNo; } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java index 97a696a961f..4b1873e8d06 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java @@ -38,7 +38,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Objects; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; @@ -74,7 +73,6 @@ public class ReplicationResponse extends ActionResponse { public static class ShardInfo implements Streamable, ToXContentObject { - private static final String _SHARDS = "_shards"; private static final String TOTAL = "total"; private static final String SUCCESSFUL = "successful"; private static final String FAILED = "failed"; @@ -134,25 +132,6 @@ public class ReplicationResponse extends ActionResponse { return status; } - @Override - public boolean equals(Object that) { - if (this == that) { - return true; - } - if (that == null || getClass() != that.getClass()) { - return false; - } - ShardInfo other = (ShardInfo) that; - return Objects.equals(total, other.total) && - Objects.equals(successful, other.successful) && - Arrays.equals(failures, other.failures); - } - - @Override - public int hashCode() { - return Objects.hash(total, successful, failures); - } - @Override public void readFrom(StreamInput in) throws IOException { total = in.readVInt(); @@ -327,27 +306,6 @@ public class ReplicationResponse extends ActionResponse { return primary; } - @Override - public boolean equals(Object that) { - if (this == that) { - return true; - } - if (that == null || getClass() != that.getClass()) { - return false; - } - Failure failure = (Failure) that; - return Objects.equals(primary, failure.primary) && - Objects.equals(shardId, failure.shardId) && - Objects.equals(nodeId, failure.nodeId) && - Objects.equals(cause, failure.cause) && - Objects.equals(status, failure.status); - } - - @Override - public int hashCode() { - return Objects.hash(shardId, nodeId, cause, status, primary); - } - @Override public void readFrom(StreamInput in) throws IOException { shardId = ShardId.readShardId(in); diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 0235dd95a4b..67d62113062 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -171,7 +171,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio final ShardId shardId = request.getShardId(); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); final IndexShard indexShard = indexService.getShard(shardId.getId()); - final UpdateHelper.Result result = updateHelper.prepare(request, indexShard, threadPool::estimatedTimeInMillis); + final UpdateHelper.Result result = updateHelper.prepare(request, indexShard, threadPool::absoluteTimeInMillis); switch (result.getResponseResult()) { case CREATED: IndexRequest upsertRequest = result.action(); diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 930943ea0f2..2f153cdbef7 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -30,8 +30,11 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; @@ -49,7 +52,7 @@ import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; public class UpdateRequest extends InstanceShardOperationRequest - implements DocWriteRequest, WriteRequest { + implements DocWriteRequest, WriteRequest, ToXContentObject { private String type; private String id; @@ -553,16 +556,6 @@ public class UpdateRequest extends InstanceShardOperationRequest return this; } - /** - * Sets the doc to use for updates when a script is not specified. - * @deprecated use {@link #doc(String, XContentType)} - */ - @Deprecated - public UpdateRequest doc(String source) { - safeDoc().source(source); - return this; - } - /** * Sets the doc to use for updates when a script is not specified. */ @@ -571,16 +564,6 @@ public class UpdateRequest extends InstanceShardOperationRequest return this; } - /** - * Sets the doc to use for updates when a script is not specified. - * @deprecated use {@link #doc(byte[], XContentType)} - */ - @Deprecated - public UpdateRequest doc(byte[] source) { - safeDoc().source(source); - return this; - } - /** * Sets the doc to use for updates when a script is not specified. */ @@ -589,16 +572,6 @@ public class UpdateRequest extends InstanceShardOperationRequest return this; } - /** - * Sets the doc to use for updates when a script is not specified. - * @deprecated use {@link #doc(byte[], int, int, XContentType)} - */ - @Deprecated - public UpdateRequest doc(byte[] source, int offset, int length) { - safeDoc().source(source, offset, length); - return this; - } - /** * Sets the doc to use for updates when a script is not specified. */ @@ -669,16 +642,6 @@ public class UpdateRequest extends InstanceShardOperationRequest return this; } - /** - * Sets the doc source of the update request to be used when the document does not exists. - * @deprecated use {@link #upsert(String, XContentType)} - */ - @Deprecated - public UpdateRequest upsert(String source) { - safeUpsertRequest().source(source); - return this; - } - /** * Sets the doc source of the update request to be used when the document does not exists. */ @@ -687,16 +650,6 @@ public class UpdateRequest extends InstanceShardOperationRequest return this; } - /** - * Sets the doc source of the update request to be used when the document does not exists. - * @deprecated use {@link #upsert(byte[], XContentType)} - */ - @Deprecated - public UpdateRequest upsert(byte[] source) { - safeUpsertRequest().source(source); - return this; - } - /** * Sets the doc source of the update request to be used when the document does not exists. */ @@ -705,16 +658,6 @@ public class UpdateRequest extends InstanceShardOperationRequest return this; } - /** - * Sets the doc source of the update request to be used when the document does not exists. - * @deprecated use {@link #upsert(byte[], int, int, XContentType)} - */ - @Deprecated - public UpdateRequest upsert(byte[] source, int offset, int length) { - safeUpsertRequest().source(source, offset, length); - return this; - } - /** * Sets the doc source of the update request to be used when the document does not exists. */ @@ -906,4 +849,42 @@ public class UpdateRequest extends InstanceShardOperationRequest out.writeBoolean(scriptedUpsert); } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (docAsUpsert) { + builder.field("doc_as_upsert", docAsUpsert); + } + if (doc != null) { + XContentType xContentType = doc.getContentType(); + try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, doc.source(), xContentType)) { + builder.field("doc"); + builder.copyCurrentStructure(parser); + } + } + if (script != null) { + builder.field("script", script); + } + if (upsertRequest != null) { + XContentType xContentType = upsertRequest.getContentType(); + try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, upsertRequest.source(), xContentType)) { + builder.field("upsert"); + builder.copyCurrentStructure(parser); + } + } + if (scriptedUpsert) { + builder.field("scripted_upsert", scriptedUpsert); + } + if (detectNoop == false) { + builder.field("detect_noop", detectNoop); + } + if (fields != null) { + builder.array("fields", fields); + } + if (fetchSourceContext != null) { + builder.field("_source", fetchSourceContext); + } + builder.endObject(); + return builder; + } } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index 92b2ce6d7d8..5ba187013e7 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -221,16 +221,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder coll, String delim, String prefix, String suffix) { - return collectionToDelimitedString(coll, delim, prefix, suffix, new StringBuilder()); + StringBuilder sb = new StringBuilder(); + collectionToDelimitedString(coll, delim, prefix, suffix, sb); + return sb.toString(); } - public static String collectionToDelimitedString(Iterable coll, String delim, String prefix, String suffix, StringBuilder sb) { + public static void collectionToDelimitedString(Iterable coll, String delim, String prefix, String suffix, StringBuilder sb) { Iterator it = coll.iterator(); while (it.hasNext()) { sb.append(prefix).append(it.next()).append(suffix); @@ -723,7 +724,6 @@ public class Strings { sb.append(delim); } } - return sb.toString(); } /** @@ -758,12 +758,14 @@ public class Strings { * @return the delimited String */ public static String arrayToDelimitedString(Object[] arr, String delim) { - return arrayToDelimitedString(arr, delim, new StringBuilder()); + StringBuilder sb = new StringBuilder(); + arrayToDelimitedString(arr, delim, sb); + return sb.toString(); } - public static String arrayToDelimitedString(Object[] arr, String delim, StringBuilder sb) { + public static void arrayToDelimitedString(Object[] arr, String delim, StringBuilder sb) { if (isEmpty(arr)) { - return ""; + return; } for (int i = 0; i < arr.length; i++) { if (i > 0) { @@ -771,7 +773,6 @@ public class Strings { } sb.append(arr[i]); } - return sb.toString(); } /** diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java b/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java index 9092e13eb1b..0e790c0dc8b 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java @@ -63,7 +63,7 @@ public class BlobPath implements Iterable { public String buildAsString() { String p = String.join(SEPARATOR, paths); - if (p.isEmpty()) { + if (p.isEmpty() || p.endsWith(SEPARATOR)) { return p; } return p + SEPARATOR; diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 968ad8ac6bc..0c6cb1c5cda 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -448,12 +448,20 @@ public class XContentHelper { * {@link XContentType}. Wraps the output into a new anonymous object. */ public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, boolean humanReadable) throws IOException { + return toXContent(toXContent, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + } + + /** + * Returns the bytes that represent the XContent output of the provided {@link ToXContent} object, using the provided + * {@link XContentType}. Wraps the output into a new anonymous object. + */ + public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, Params params, boolean humanReadable) throws IOException { try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { builder.humanReadable(humanReadable); if (toXContent.isFragment()) { builder.startObject(); } - toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS); + toXContent.toXContent(builder, params); if (toXContent.isFragment()) { builder.endObject(); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index c65542093d3..6f56a547d3f 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -39,7 +39,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; public class MembershipAction extends AbstractComponent { @@ -63,8 +62,7 @@ public class MembershipAction extends AbstractComponent { private final MembershipListener listener; - public MembershipAction(Settings settings, TransportService transportService, - Supplier localNodeSupplier, MembershipListener listener) { + public MembershipAction(Settings settings, TransportService transportService, MembershipListener listener) { super(settings); this.transportService = transportService; this.listener = listener; @@ -73,7 +71,7 @@ public class MembershipAction extends AbstractComponent { transportService.registerRequestHandler(DISCOVERY_JOIN_ACTION_NAME, JoinRequest::new, ThreadPool.Names.GENERIC, new JoinRequestRequestHandler()); transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME, - () -> new ValidateJoinRequest(localNodeSupplier), ThreadPool.Names.GENERIC, + () -> new ValidateJoinRequest(), ThreadPool.Names.GENERIC, new ValidateJoinRequestRequestHandler()); transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new, ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler()); @@ -155,22 +153,18 @@ public class MembershipAction extends AbstractComponent { } static class ValidateJoinRequest extends TransportRequest { - private final Supplier localNode; private ClusterState state; - ValidateJoinRequest(Supplier localNode) { - this.localNode = localNode; - } + ValidateJoinRequest() {} ValidateJoinRequest(ClusterState state) { this.state = state; - this.localNode = state.nodes()::getLocalNode; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - this.state = ClusterState.readFrom(in, localNode.get()); + this.state = ClusterState.readFrom(in, null); } @Override diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 94c46ed8670..be6f52fc22c 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -191,7 +191,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover new NewPendingClusterStateListener(), discoverySettings, clusterService.getClusterName()); - this.membership = new MembershipAction(settings, transportService, this::localNode, new MembershipListener()); + this.membership = new MembershipAction(settings, transportService, new MembershipListener()); this.joinThreadControl = new JoinThreadControl(); transportService.registerRequestHandler( diff --git a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index b5e254aa4c2..9bf8be2da45 100644 --- a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -19,6 +19,7 @@ package org.elasticsearch.http; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.transport.PortsRange; @@ -69,7 +70,14 @@ public final class HttpTransportSettings { public static final Setting SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, Property.NodeScope); public static final Setting SETTING_HTTP_CONTENT_TYPE_REQUIRED = - Setting.boolSetting("http.content_type.required", false, Property.NodeScope); + new Setting<>("http.content_type.required", (s) -> Boolean.toString(true), (s) -> { + final boolean value = Booleans.parseBoolean(s); + if (value == false) { + throw new IllegalArgumentException("http.content_type.required cannot be set to false. It exists only to make a rolling" + + " upgrade easier"); + } + return true; + }, Property.NodeScope, Property.Deprecated); public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope); public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 36b8ab6574c..f6b452502a5 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -391,6 +391,14 @@ public abstract class Engine implements Closeable { this.created = created; } + /** + * use in case of index operation failed before getting to internal engine + * (e.g while preparing operation or updating mappings) + * */ + public IndexResult(Exception failure, long version) { + this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO); + } + public IndexResult(Exception failure, long version, long seqNo) { super(Operation.TYPE.INDEX, failure, version, seqNo); this.created = false; diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 4fe947660c1..60dddc4d40d 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -188,7 +188,7 @@ public final class EngineConfig { /** * Returns a thread-pool mainly used to get estimated time stamps from - * {@link org.elasticsearch.threadpool.ThreadPool#estimatedTimeInMillis()} and to schedule + * {@link org.elasticsearch.threadpool.ThreadPool#relativeTimeInMillis()} and to schedule * async force merge calls on the {@link org.elasticsearch.threadpool.ThreadPool.Names#FORCE_MERGE} thread-pool */ public ThreadPool getThreadPool() { diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index bcfee5026ce..0fa6855ce08 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -147,7 +147,7 @@ public class InternalEngine extends Engine { EngineMergeScheduler scheduler = null; boolean success = false; try { - this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis(); + this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings()); throttle = new IndexThrottle(); @@ -446,7 +446,7 @@ public class InternalEngine extends Engine { private long checkDeletedAndGCed(VersionValue versionValue) { long currentVersion; - if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) { + if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().relativeTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) { currentVersion = Versions.NOT_FOUND; // deleted, and GC } else { currentVersion = versionValue.version(); @@ -478,6 +478,20 @@ public class InternalEngine extends Engine { return false; } + private boolean assertVersionType(final Engine.Operation operation) { + if (operation.origin() == Operation.Origin.REPLICA || + operation.origin() == Operation.Origin.PEER_RECOVERY || + operation.origin() == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + // ensure that replica operation has expected version type for replication + // ensure that versionTypeForReplicationAndRecovery is idempotent + assert operation.versionType() == operation.versionType().versionTypeForReplicationAndRecovery() + : "unexpected version type in request from [" + operation.origin().name() + "] " + + "found [" + operation.versionType().name() + "] " + + "expected [" + operation.versionType().versionTypeForReplicationAndRecovery().name() + "]"; + } + return true; + } + private boolean assertSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) { if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) && origin == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { // legacy support @@ -499,6 +513,7 @@ public class InternalEngine extends Engine { try (ReleasableLock releasableLock = readLock.acquire()) { ensureOpen(); assert assertSequenceNumber(index.origin(), index.seqNo()); + assert assertVersionType(index); final Translog.Location location; long seqNo = index.seqNo(); try (Releasable ignored = acquireLock(index.uid()); @@ -692,6 +707,7 @@ public class InternalEngine extends Engine { public DeleteResult delete(Delete delete) throws IOException { DeleteResult result; try (ReleasableLock ignored = readLock.acquire()) { + assert assertVersionType(delete); ensureOpen(); // NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments: result = innerDelete(delete); @@ -710,7 +726,7 @@ public class InternalEngine extends Engine { private void maybePruneDeletedTombstones() { // It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it // every 1/4 of gcDeletesInMillis: - if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().estimatedTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) { + if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) { pruneDeletedTombstones(); } } @@ -756,7 +772,7 @@ public class InternalEngine extends Engine { deleteResult = new DeleteResult(updatedVersion, seqNo, found); versionMap.putUnderLock(delete.uid().bytes(), - new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis())); + new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().relativeTimeInMillis())); } if (!deleteResult.hasFailure()) { location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY @@ -1031,7 +1047,7 @@ public class InternalEngine extends Engine { } private void pruneDeletedTombstones() { - long timeMSec = engineConfig.getThreadPool().estimatedTimeInMillis(); + long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); // TODO: not good that we reach into LiveVersionMap here; can we move this inside VersionMap instead? problem is the dirtyLock... diff --git a/core/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index f9926832008..6cbc747b2f9 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -162,7 +162,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper } public static class GeoPointFieldType extends MappedFieldType { - GeoPointFieldType() { + public GeoPointFieldType() { } GeoPointFieldType(GeoPointFieldType ref) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java index 6cac16d2fce..8e18c820b79 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java @@ -23,7 +23,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.TermsQuery; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.MultiTermQuery; @@ -114,12 +114,12 @@ public class IdFieldMapper extends MetadataFieldMapper { @Override public Query termQuery(Object value, @Nullable QueryShardContext context) { final BytesRef[] uids = Uid.createUidsForTypesAndId(context.queryTypes(), value); - return new TermsQuery(UidFieldMapper.NAME, uids); + return new TermInSetQuery(UidFieldMapper.NAME, uids); } @Override public Query termsQuery(List values, @Nullable QueryShardContext context) { - return new TermsQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(context.queryTypes(), values)); + return new TermInSetQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(context.queryTypes(), values)); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index df35253b8ec..a15432c635d 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -124,7 +124,7 @@ public class IpFieldMapper extends FieldMapper { public static final class IpFieldType extends MappedFieldType { - IpFieldType() { + public IpFieldType() { super(); setTokenized(false); setHasDocValues(true); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java index e8a11fc5d47..a7d59fcfb42 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java @@ -22,7 +22,7 @@ package org.elasticsearch.index.mapper; import java.util.List; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.TermsQuery; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; @@ -53,7 +53,7 @@ public abstract class StringFieldType extends TermBasedFieldType { for (int i = 0; i < bytesRefs.length; i++) { bytesRefs[i] = indexedValueForSearch(values.get(i)); } - return new TermsQuery(name(), bytesRefs); + return new TermInSetQuery(name(), bytesRefs); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java index e1fd56616f3..89b09cc068e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java @@ -22,9 +22,9 @@ package org.elasticsearch.index.mapper; import java.util.List; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; @@ -66,7 +66,7 @@ abstract class TermBasedFieldType extends MappedFieldType { for (int i = 0; i < bytesRefs.length; i++) { bytesRefs[i] = indexedValueForSearch(values.get(i)); } - return new TermsQuery(name(), bytesRefs); + return new TermInSetQuery(name(), bytesRefs); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index 5f5be04a914..c24747e62c8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -26,13 +26,13 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermContext; -import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.Lucene; @@ -172,7 +172,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { * Specialization for a disjunction over many _type */ public static class TypesQuery extends Query { - // Same threshold as TermsQuery + // Same threshold as TermInSetQuery private static final int BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD = 16; private final BytesRef[] types; @@ -220,7 +220,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { } return new ConstantScoreQuery(bq.build()); } - return new TermsQuery(CONTENT_TYPE, types); + return new TermInSetQuery(CONTENT_TYPE, types); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java index 630cf2d93b9..5857ef9abf3 100644 --- a/core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java @@ -19,8 +19,8 @@ package org.elasticsearch.index.query; -import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; @@ -175,7 +175,7 @@ public class IdsQueryBuilder extends AbstractQueryBuilder { Collections.addAll(typesForQuery, types); } - query = new TermsQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(typesForQuery, ids)); + query = new TermInSetQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(typesForQuery, ids)); } return query; } diff --git a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index 9f083eaab09..5df7ace69bb 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -21,10 +21,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.Fields; -import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; @@ -1165,7 +1165,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder { if (o instanceof BytesRef) { b = (BytesRef) o; } else { - builder.copyChars(o.toString()); + builder.copyChars(o.toString()); b = builder.get(); } bytesOut.writeBytes(b.bytes, b.offset, b.length); @@ -410,7 +410,7 @@ public class TermsQueryBuilder extends AbstractQueryBuilder { for (int i = 0; i < filterValues.length; i++) { filterValues[i] = BytesRefs.toBytesRef(values.get(i)); } - return new TermsQuery(fieldName, filterValues); + return new TermInSetQuery(fieldName, filterValues); } } diff --git a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index c3950e1012a..885fdfc9e65 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -57,4 +57,52 @@ public class SequenceNumbers { return new SeqNoStats(maxSeqNo, localCheckpoint, globalCheckpoint); } + /** + * Compute the minimum of the given current minimum sequence number and the specified sequence number, accounting for the fact that the + * current minimum sequence number could be {@link SequenceNumbersService#NO_OPS_PERFORMED} or + * {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. When the current minimum sequence number is not + * {@link SequenceNumbersService#NO_OPS_PERFORMED} nor {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}, the specified sequence number + * must not be {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. + * + * @param minSeqNo the current minimum sequence number + * @param seqNo the specified sequence number + * @return the new minimum sequence number + */ + public static long min(final long minSeqNo, final long seqNo) { + if (minSeqNo == SequenceNumbersService.NO_OPS_PERFORMED) { + return seqNo; + } else if (minSeqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + return seqNo; + } else { + if (seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + throw new IllegalArgumentException("sequence number must be assigned"); + } + return Math.min(minSeqNo, seqNo); + } + } + + /** + * Compute the maximum of the given current maximum sequence number and the specified sequence number, accounting for the fact that the + * current maximum sequence number could be {@link SequenceNumbersService#NO_OPS_PERFORMED} or + * {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. When the current maximum sequence number is not + * {@link SequenceNumbersService#NO_OPS_PERFORMED} nor {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}, the specified sequence number + * must not be {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. + * + * @param maxSeqNo the current maximum sequence number + * @param seqNo the specified sequence number + * @return the new maximum sequence number + */ + public static long max(final long maxSeqNo, final long seqNo) { + if (maxSeqNo == SequenceNumbersService.NO_OPS_PERFORMED) { + return seqNo; + } else if (maxSeqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + return seqNo; + } else { + if (seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + throw new IllegalArgumentException("sequence number must be assigned"); + } + return Math.max(maxSeqNo, seqNo); + } + } + } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java index cde14dec173..70e2037664f 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java @@ -20,9 +20,11 @@ package org.elasticsearch.index.shard; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext; import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; @@ -32,6 +34,7 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; public class IndexShardOperationsLock implements Closeable { private final ShardId shardId; @@ -126,11 +129,13 @@ public class IndexShardOperationsLock implements Closeable { if (delayedOperations == null) { delayedOperations = new ArrayList<>(); } + final Supplier contextSupplier = threadPool.getThreadContext().newRestorableContext(false); if (executorOnDelay != null) { delayedOperations.add( - new ThreadedActionListener<>(logger, threadPool, executorOnDelay, onAcquired, forceExecution)); + new ThreadedActionListener<>(logger, threadPool, executorOnDelay, + new ContextPreservingActionListener<>(contextSupplier, onAcquired), forceExecution)); } else { - delayedOperations.add(onAcquired); + delayedOperations.add(new ContextPreservingActionListener<>(contextSupplier, onAcquired)); } return; } diff --git a/core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java index bf61febb741..6f392c195fd 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java +++ b/core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.translog; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.ByteBufferStreamInput; import java.io.IOException; @@ -52,7 +51,9 @@ public abstract class BaseTranslogReader implements Comparable synchronized(this) private final Object syncLock = new Object(); - public TranslogWriter( + private TranslogWriter( final ChannelFactory channelFactory, final ShardId shardId, final Checkpoint initialCheckpoint, @@ -80,6 +85,10 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { this.outputStream = new BufferedChannelOutputStream(java.nio.channels.Channels.newOutputStream(channel), bufferSize.bytesAsInt()); this.lastSyncedCheckpoint = initialCheckpoint; this.totalOffset = initialCheckpoint.offset; + assert initialCheckpoint.minSeqNo == SequenceNumbersService.NO_OPS_PERFORMED : initialCheckpoint.minSeqNo; + this.minSeqNo = initialCheckpoint.minSeqNo; + assert initialCheckpoint.maxSeqNo == SequenceNumbersService.NO_OPS_PERFORMED : initialCheckpoint.maxSeqNo; + this.maxSeqNo = initialCheckpoint.maxSeqNo; this.globalCheckpointSupplier = globalCheckpointSupplier; } @@ -115,10 +124,9 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { writeHeader(out, ref); channel.force(true); final Checkpoint checkpoint = - writeCheckpoint(channelFactory, headerLength, 0, globalCheckpointSupplier.getAsLong(), file.getParent(), fileGeneration); - final TranslogWriter writer = - new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, globalCheckpointSupplier); - return writer; + Checkpoint.emptyTranslogCheckpoint(headerLength, fileGeneration, globalCheckpointSupplier.getAsLong()); + writeCheckpoint(channelFactory, file.getParent(), checkpoint); + return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, globalCheckpointSupplier); } catch (Exception exception) { // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition @@ -151,21 +159,42 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { /** * add the given bytes to the translog and return the location they were written at */ - public synchronized Translog.Location add(BytesReference data) throws IOException { + + /** + * Add the given bytes to the translog with the specified sequence number; returns the location the bytes were written to. + * + * @param data the bytes to write + * @param seqNo the sequence number associated with the operation + * @return the location the bytes were written to + * @throws IOException if writing to the translog resulted in an I/O exception + */ + public synchronized Translog.Location add(final BytesReference data, final long seqNo) throws IOException { ensureOpen(); final long offset = totalOffset; try { data.writeTo(outputStream); - } catch (Exception ex) { + } catch (final Exception ex) { try { closeWithTragicEvent(ex); - } catch (Exception inner) { + } catch (final Exception inner) { ex.addSuppressed(inner); } throw ex; } totalOffset += data.length(); + + if (minSeqNo == SequenceNumbersService.NO_OPS_PERFORMED) { + assert operationCounter == 0; + } + if (maxSeqNo == SequenceNumbersService.NO_OPS_PERFORMED) { + assert operationCounter == 0; + } + + minSeqNo = SequenceNumbers.min(minSeqNo, seqNo); + maxSeqNo = SequenceNumbers.max(maxSeqNo, seqNo); + operationCounter++; + return new Translog.Location(generation, offset, data.length()); } @@ -191,13 +220,20 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { return operationCounter; } + @Override + Checkpoint getCheckpoint() { + return getLastSyncedCheckpoint(); + } + @Override public long sizeInBytes() { return totalOffset; } /** - * closes this writer and transfers it's underlying file channel to a new immutable reader + * Closes this writer and transfers its underlying file channel to a new immutable {@link TranslogReader} + * @return a new {@link TranslogReader} + * @throws IOException if any of the file operations resulted in an I/O exception */ public TranslogReader closeIntoReader() throws IOException { // make sure to acquire the sync lock first, to prevent dead locks with threads calling @@ -218,18 +254,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { throw e; } if (closed.compareAndSet(false, true)) { - boolean success = false; - try { - final TranslogReader reader = - new TranslogReader(generation, channel, path, firstOperationOffset, getWrittenOffset(), operationCounter); - success = true; - return reader; - } finally { - if (success == false) { - // close the channel, as we are closed and failed to create a new reader - IOUtils.closeWhileHandlingException(channel); - } - } + return new TranslogReader(getLastSyncedCheckpoint(), channel, path, getFirstOperationOffset()); } else { throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", tragedy); } @@ -272,14 +297,18 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { // the lock we should check again since if this code is busy we might have fsynced enough already final long offsetToSync; final int opsCounter; - final long globalCheckpoint; + final long currentMinSeqNo; + final long currentMaxSeqNo; + final long currentGlobalCheckpoint; synchronized (this) { ensureOpen(); try { outputStream.flush(); offsetToSync = totalOffset; opsCounter = operationCounter; - globalCheckpoint = globalCheckpointSupplier.getAsLong(); + currentMinSeqNo = minSeqNo; + currentMaxSeqNo = maxSeqNo; + currentGlobalCheckpoint = globalCheckpointSupplier.getAsLong(); } catch (Exception ex) { try { closeWithTragicEvent(ex); @@ -295,7 +324,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { try { channel.force(false); checkpoint = - writeCheckpoint(channelFactory, offsetToSync, opsCounter, globalCheckpoint, path.getParent(), generation); + writeCheckpoint(channelFactory, offsetToSync, opsCounter, currentMinSeqNo, currentMaxSeqNo, currentGlobalCheckpoint, path.getParent(), generation); } catch (Exception ex) { try { closeWithTragicEvent(ex); @@ -333,24 +362,32 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { } private static Checkpoint writeCheckpoint( - ChannelFactory channelFactory, - long syncPosition, - int numOperations, - long globalCheckpoint, - Path translogFile, - long generation) throws IOException { - final Path checkpointFile = translogFile.resolve(Translog.CHECKPOINT_FILE_NAME); - final Checkpoint checkpoint = new Checkpoint(syncPosition, numOperations, generation, globalCheckpoint); - Checkpoint.write(channelFactory::open, checkpointFile, checkpoint, StandardOpenOption.WRITE); + ChannelFactory channelFactory, + long syncPosition, + int numOperations, + long minSeqNo, + long maxSeqNo, + long globalCheckpoint, + Path translogFile, + long generation) throws IOException { + final Checkpoint checkpoint = new Checkpoint(syncPosition, numOperations, generation, minSeqNo, maxSeqNo, globalCheckpoint); + writeCheckpoint(channelFactory, translogFile, checkpoint); return checkpoint; } + private static void writeCheckpoint( + final ChannelFactory channelFactory, + final Path translogFile, + final Checkpoint checkpoint) throws IOException { + Checkpoint.write(channelFactory, translogFile.resolve(Translog.CHECKPOINT_FILE_NAME), checkpoint, StandardOpenOption.WRITE); + } + /** * The last synced checkpoint for this translog. * * @return the last synced checkpoint */ - public Checkpoint getLastSyncedCheckpoint() { + Checkpoint getLastSyncedCheckpoint() { return lastSyncedCheckpoint; } @@ -402,4 +439,5 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { throw new IllegalStateException("never close this stream"); } } + } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index 40a75c16370..ea1f4c13dfd 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -168,7 +168,8 @@ public class TruncateTranslogCommand extends EnvironmentAwareCommand { /** Write a checkpoint file to the given location with the given generation */ public static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration) throws IOException { - Checkpoint emptyCheckpoint = new Checkpoint(translogLength, 0, translogGeneration, SequenceNumbersService.UNASSIGNED_SEQ_NO); + Checkpoint emptyCheckpoint = + Checkpoint.emptyTranslogCheckpoint(translogLength, translogGeneration, SequenceNumbersService.UNASSIGNED_SEQ_NO); Checkpoint.write(FileChannel::open, filename, emptyCheckpoint, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); // fsync with metadata here to make sure. diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 39675ffce82..a4e4c83bc00 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -69,6 +69,7 @@ import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; @@ -509,7 +510,7 @@ public class IndicesService extends AbstractLifecycleComponent client.admin().indices().preparePutMapping() .setConcreteIndex(shardRouting.index()) // concrete index - no name clash, it uses uuid .setType(type) - .setSource(mapping.source().string()) + .setSource(mapping.source().string(), XContentType.JSON) .get(); } catch (IOException ex) { throw new ElasticsearchException("failed to stringify mapping source", ex); diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index 1aaf3077aea..61950942e60 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -140,6 +140,7 @@ import org.elasticsearch.index.analysis.UniqueTokenFilterFactory; import org.elasticsearch.index.analysis.UpperCaseTokenFilterFactory; import org.elasticsearch.index.analysis.WhitespaceAnalyzerProvider; import org.elasticsearch.index.analysis.WhitespaceTokenizerFactory; +import org.elasticsearch.index.analysis.WordDelimiterGraphTokenFilterFactory; import org.elasticsearch.index.analysis.WordDelimiterTokenFilterFactory; import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory; import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory; @@ -225,6 +226,7 @@ public final class AnalysisModule { tokenFilters.register("snowball", SnowballTokenFilterFactory::new); tokenFilters.register("stemmer", StemmerTokenFilterFactory::new); tokenFilters.register("word_delimiter", WordDelimiterTokenFilterFactory::new); + tokenFilters.register("word_delimiter_graph", WordDelimiterGraphTokenFilterFactory::new); tokenFilters.register("delimited_payload_filter", DelimitedPayloadTokenFilterFactory::new); tokenFilters.register("elision", ElisionTokenFilterFactory::new); tokenFilters.register("flatten_graph", FlattenGraphTokenFilterFactory::new); diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java index 53e79cb9dfe..6c58ab884db 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java @@ -51,6 +51,7 @@ import org.apache.lucene.analysis.miscellaneous.TrimFilter; import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter; import org.apache.lucene.analysis.miscellaneous.UniqueTokenFilter; import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter; +import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; import org.apache.lucene.analysis.ngram.NGramTokenFilter; import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter; @@ -87,6 +88,18 @@ public enum PreBuiltTokenFilters { } }, + WORD_DELIMITER_GRAPH(CachingStrategy.ONE) { + @Override + public TokenStream create(TokenStream tokenStream, Version version) { + return new WordDelimiterGraphFilter(tokenStream, + WordDelimiterGraphFilter.GENERATE_WORD_PARTS | + WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS | + WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | + WordDelimiterGraphFilter.SPLIT_ON_NUMERICS | + WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, null); + } + }, + STOP(CachingStrategy.LUCENE) { @Override public TokenStream create(TokenStream tokenStream, Version version) { diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java index 33cb70a0d0b..7bb1e51cd23 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java @@ -137,7 +137,7 @@ public class FsInfo implements Iterable, Writeable, ToXContent { } public void add(Path path) { - total = addLong(total, path.total); + total = FsProbe.adjustForHugeFilesystems(addLong(total, path.total)); free = addLong(free, path.free); available = addLong(available, path.available); if (path.spins != null && path.spins.booleanValue()) { diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java index d079a720168..1fdae49a6f1 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java @@ -136,7 +136,11 @@ public class FsProbe extends AbstractComponent { } /* See: https://bugs.openjdk.java.net/browse/JDK-8162520 */ - private static long adjustForHugeFilesystems(long bytes) { + /** + * Take a large value intended to be positive, and if it has overflowed, + * return {@code Long.MAX_VALUE} instead of a negative number. + */ + static long adjustForHugeFilesystems(long bytes) { if (bytes < 0) { return Long.MAX_VALUE; } diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java b/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java index 0242e712cce..a2cf1891bad 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java @@ -188,7 +188,7 @@ public class OsProbe { } // pattern for lines in /proc/self/cgroup - private static final Pattern CONTROL_GROUP_PATTERN = Pattern.compile("\\d+:([^:,]+(?:,[^:,]+)?):(/.*)"); + private static final Pattern CONTROL_GROUP_PATTERN = Pattern.compile("\\d+:([^:]+):(/.*)"); // this property is to support a hack to workaround an issue with Docker containers mounting the cgroups hierarchy inconsistently with // respect to /proc/self/cgroup; for Docker containers this should be set to "/" diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index bcf288dab27..b1fdc20117b 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -733,22 +733,24 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp */ long latestIndexBlobId() throws IOException { try { - // first, try reading the latest index generation from the index.latest blob - return readSnapshotIndexLatestBlob(); - } catch (IOException ioe) { - // we could not find the index.latest blob, this can happen in two scenarios: - // (1) its an empty repository - // (2) when writing the index-latest blob, if the blob already exists, - // we first delete it, then atomically write the new blob. there is - // a small window in time when the blob is deleted and the new one - // written - if the node crashes during that time, we won't have an - // index-latest blob - // lets try to list all index-N blobs to determine the last one, if listing the blobs - // is not a supported operation (which is the case for read-only repositories), then - // assume its an empty repository. + // First, try listing all index-N blobs (there should only be two index-N blobs at any given + // time in a repository if cleanup is happening properly) and pick the index-N blob with the + // highest N value - this will be the latest index blob for the repository. Note, we do this + // instead of directly reading the index.latest blob to get the current index-N blob because + // index.latest is not written atomically and is not immutable - on every index-N change, + // we first delete the old index.latest and then write the new one. If the repository is not + // read-only, it is possible that we try deleting the index.latest blob while it is being read + // by some other operation (such as the get snapshots operation). In some file systems, it is + // illegal to delete a file while it is being read elsewhere (e.g. Windows). For read-only + // repositories, we read for index.latest, both because listing blob prefixes is often unsupported + // and because the index.latest blob will never be deleted and re-written. + return listBlobsToGetLatestIndexId(); + } catch (UnsupportedOperationException e) { + // If its a read-only repository, listing blobs by prefix may not be supported (e.g. a URL repository), + // in this case, try reading the latest index generation from the index.latest blob try { - return listBlobsToGetLatestIndexId(); - } catch (UnsupportedOperationException uoe) { + return readSnapshotIndexLatestBlob(); + } catch (NoSuchFileException nsfe) { return RepositoryData.EMPTY_REPO_GEN; } } @@ -765,7 +767,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private long listBlobsToGetLatestIndexId() throws IOException { Map blobs = snapshotsBlobContainer.listBlobsByPrefix(INDEX_FILE_PREFIX); - long latest = -1; + long latest = RepositoryData.EMPTY_REPO_GEN; if (blobs.isEmpty()) { // no snapshot index blobs have been written yet return latest; diff --git a/core/src/main/java/org/elasticsearch/rest/RestController.java b/core/src/main/java/org/elasticsearch/rest/RestController.java index 5f033eb4aff..8e67a2e7441 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestController.java +++ b/core/src/main/java/org/elasticsearch/rest/RestController.java @@ -34,10 +34,8 @@ import org.elasticsearch.common.path.PathTrie; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.http.HttpServerTransport; -import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.indices.breaker.CircuitBreakerService; import java.io.ByteArrayOutputStream; @@ -75,10 +73,6 @@ public class RestController extends AbstractComponent implements HttpServerTrans /** Rest headers that are copied to internal requests made during a rest request. */ private final Set headersToCopy; - private final boolean isContentTypeRequired; - - private final DeprecationLogger deprecationLogger; - public RestController(Settings settings, Set headersToCopy, UnaryOperator handlerWrapper, NodeClient client, CircuitBreakerService circuitBreakerService) { super(settings); @@ -89,8 +83,6 @@ public class RestController extends AbstractComponent implements HttpServerTrans this.handlerWrapper = handlerWrapper; this.client = client; this.circuitBreakerService = circuitBreakerService; - this.isContentTypeRequired = HttpTransportSettings.SETTING_HTTP_CONTENT_TYPE_REQUIRED.get(settings); - this.deprecationLogger = new DeprecationLogger(logger); } /** @@ -182,7 +174,7 @@ public class RestController extends AbstractComponent implements HttpServerTrans assert contentLength >= 0 : "content length was negative, how is that possible?"; final RestHandler handler = getHandler(request); - if (contentLength > 0 && hasContentTypeOrCanAutoDetect(request, handler) == false) { + if (contentLength > 0 && hasContentType(request, handler) == false) { sendContentTypeErrorMessage(request, responseChannel); } else if (contentLength > 0 && handler != null && handler.supportsContentStream() && request.getXContentType() != XContentType.JSON && request.getXContentType() != XContentType.SMILE) { @@ -266,43 +258,19 @@ public class RestController extends AbstractComponent implements HttpServerTrans /** * If a request contains content, this method will return {@code true} if the {@code Content-Type} header is present, matches an - * {@link XContentType} or the request is plain text, and content type is required. If content type is not required then this method - * returns true unless a content type could not be inferred from the body and the rest handler does not support plain text + * {@link XContentType} or the handler supports a content stream and the content type header is for newline delimited JSON, */ - private boolean hasContentTypeOrCanAutoDetect(final RestRequest restRequest, final RestHandler restHandler) { + private boolean hasContentType(final RestRequest restRequest, final RestHandler restHandler) { if (restRequest.getXContentType() == null) { - if (restHandler != null && restHandler.supportsPlainText()) { - // content type of null with a handler that supports plain text gets through for now. Once we remove plain text this can - // be removed! - deprecationLogger.deprecated("Plain text request bodies are deprecated. Use request parameters or body " + - "in a supported format."); - } else if (restHandler != null && restHandler.supportsContentStream() && restRequest.header("Content-Type") != null) { + if (restHandler != null && restHandler.supportsContentStream() && restRequest.header("Content-Type") != null) { final String lowercaseMediaType = restRequest.header("Content-Type").toLowerCase(Locale.ROOT); // we also support newline delimited JSON: http://specs.okfnlabs.org/ndjson/ if (lowercaseMediaType.equals("application/x-ndjson")) { restRequest.setXContentType(XContentType.JSON); - } else if (isContentTypeRequired) { - return false; - } else { - return autoDetectXContentType(restRequest); + return true; } - } else if (isContentTypeRequired) { - return false; - } else { - return autoDetectXContentType(restRequest); } - } - return true; - } - - private boolean autoDetectXContentType(RestRequest restRequest) { - deprecationLogger.deprecated("Content type detection for rest requests is deprecated. Specify the content type using " + - "the [Content-Type] header."); - XContentType xContentType = XContentFactory.xContentType(restRequest.content()); - if (xContentType == null) { return false; - } else { - restRequest.setXContentType(xContentType); } return true; } diff --git a/core/src/main/java/org/elasticsearch/rest/RestHandler.java b/core/src/main/java/org/elasticsearch/rest/RestHandler.java index 215541b40e8..1ebc7a7fd1b 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestHandler.java +++ b/core/src/main/java/org/elasticsearch/rest/RestHandler.java @@ -39,15 +39,6 @@ public interface RestHandler { return true; } - /** - * Indicates if a RestHandler supports plain text bodies - * @deprecated use request parameters or bodies that can be parsed with XContent! - */ - @Deprecated - default boolean supportsPlainText() { - return false; - } - /** * Indicates if the RestHandler supports content as a stream. A stream would be multiple objects delineated by * {@link XContent#streamSeparator()}. If a handler returns true this will affect the types of content that can be sent to diff --git a/core/src/main/java/org/elasticsearch/rest/RestRequest.java b/core/src/main/java/org/elasticsearch/rest/RestRequest.java index 9aea6d213f7..509bfa7a3c0 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/core/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -28,13 +28,10 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -54,7 +51,6 @@ import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; public abstract class RestRequest implements ToXContent.Params { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(RestRequest.class)); // tchar pattern as defined by RFC7230 section 3.2.6 private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-z0-9!#$%&'*+\\-.\\^_`|~]+"); @@ -407,66 +403,17 @@ public abstract class RestRequest implements ToXContent.Params { String source = param("source"); String typeParam = param("source_content_type"); - if (source != null) { + if (source != null && typeParam != null) { BytesArray bytes = new BytesArray(source); - final XContentType xContentType; - if (typeParam != null) { - xContentType = parseContentType(Collections.singletonList(typeParam)); - } else { - DEPRECATION_LOGGER.deprecated("Deprecated use of the [source] parameter without the [source_content_type] parameter. Use " + - "the [source_content_type] parameter to specify the content type of the source such as [application/json]"); - xContentType = XContentFactory.xContentType(bytes); - } - + final XContentType xContentType = parseContentType(Collections.singletonList(typeParam)); if (xContentType == null) { - throw new IllegalStateException("could not determine source content type"); + throw new IllegalStateException("Unknown value for source_content_type [" + typeParam + "]"); } return new Tuple<>(xContentType, bytes); } return new Tuple<>(XContentType.JSON, BytesArray.EMPTY); } - /** - * Call a consumer with the parser for the contents of this request if it has contents, otherwise with a parser for the {@code source} - * parameter if there is one, otherwise with {@code null}. Use {@link #contentOrSourceParamParser()} if you should throw an exception - * back to the user when there isn't request content. This version allows for plain text content - */ - @Deprecated - public final void withContentOrSourceParamParserOrNullLenient(CheckedConsumer withParser) - throws IOException { - if (hasContent() && xContentType.get() == null) { - withParser.accept(null); - } - - Tuple tuple = contentOrSourceParam(); - BytesReference content = tuple.v2(); - XContentType xContentType = tuple.v1(); - if (content.length() > 0) { - try (XContentParser parser = xContentType.xContent().createParser(xContentRegistry, content)) { - withParser.accept(parser); - } - } else { - withParser.accept(null); - } - } - - /** - * Get the content of the request or the contents of the {@code source} param without the xcontent type. This is useful the request can - * accept non xcontent values. - * @deprecated we should only take xcontent - */ - @Deprecated - public final BytesReference getContentOrSourceParamOnly() { - if (hasContent()) { - return content(); - } - String source = param("source"); - if (source != null) { - return new BytesArray(source); - } - return BytesArray.EMPTY; - } - /** * Parses the given content type string for the media type. This method currently ignores parameters. */ diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 47037460e2b..9a168e84dd6 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -24,30 +24,20 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; import java.io.IOException; +import java.util.HashMap; import java.util.Map; import java.util.Set; -import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.client.Requests.updateSettingsRequest; -import static org.elasticsearch.common.util.set.Sets.newHashSet; public class RestUpdateSettingsAction extends BaseRestHandler { - private static final Set VALUES_TO_EXCLUDE = unmodifiableSet(newHashSet( - "error_trace", - "pretty", - "timeout", - "master_timeout", - "index", - "preserve_existing", - "expand_wildcards", - "ignore_unavailable", - "allow_no_indices")); public RestUpdateSettingsAction(Settings settings, RestController controller) { super(settings); @@ -63,29 +53,22 @@ public class RestUpdateSettingsAction extends BaseRestHandler { updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); - Settings.Builder updateSettings = Settings.builder(); - String bodySettingsStr = request.content().utf8ToString(); - if (Strings.hasText(bodySettingsStr)) { - Settings buildSettings = Settings.builder() - .loadFromSource(bodySettingsStr, request.getXContentType()) - .build(); - for (Map.Entry entry : buildSettings.getAsMap().entrySet()) { - String key = entry.getKey(); - String value = entry.getValue(); + Map settings = new HashMap<>(); + if (request.hasContent()) { + try (XContentParser parser = request.contentParser()) { + Map bodySettings = parser.map(); + Object innerBodySettings = bodySettings.get("settings"); // clean up in case the body is wrapped with "settings" : { ... } - if (key.startsWith("settings.")) { - key = key.substring("settings.".length()); + if (innerBodySettings instanceof Map) { + @SuppressWarnings("unchecked") + Map innerBodySettingsMap = (Map) innerBodySettings; + settings.putAll(innerBodySettingsMap); + } else { + settings.putAll(bodySettings); } - updateSettings.put(key, value); } } - for (Map.Entry entry : request.params().entrySet()) { - if (VALUES_TO_EXCLUDE.contains(entry.getKey())) { - continue; - } - updateSettings.put(entry.getKey(), entry.getValue()); - } - updateSettingsRequest.settings(updateSettings); + updateSettingsRequest.settings(settings); return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener<>(channel)); } @@ -94,5 +77,4 @@ public class RestUpdateSettingsAction extends BaseRestHandler { protected Set responseParams() { return Settings.FORMAT_PARAMS; } - } diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index 111e190638d..76e5c40cf42 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -60,7 +60,7 @@ public class RestThreadPoolAction extends AbstractCatAction { @Override protected void documentation(StringBuilder sb) { sb.append("/_cat/thread_pool\n"); - sb.append("/_cat/thread_pool/{thread_pools}"); + sb.append("/_cat/thread_pool/{thread_pools}\n"); } @Override diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index 715d90b30c8..d6af84d9472 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -19,9 +19,7 @@ package org.elasticsearch.rest.action.document; -import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.Requests; @@ -30,20 +28,16 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestStatusToXContentListener; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; -import static org.elasticsearch.rest.RestStatus.OK; /** *

@@ -95,36 +89,11 @@ public class RestBulkAction extends BaseRestHandler {
         bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields,
             defaultFetchSourceContext, defaultPipeline, null, allowExplicitIndex, request.getXContentType());
 
-        return channel -> client.bulk(bulkRequest, new RestBuilderListener(channel) {
-            @Override
-            public RestResponse buildResponse(BulkResponse response, XContentBuilder builder) throws Exception {
-                builder.startObject();
-                builder.field(Fields.TOOK, response.getTookInMillis());
-                if (response.getIngestTookInMillis() != BulkResponse.NO_INGEST_TOOK) {
-                    builder.field(Fields.INGEST_TOOK, response.getIngestTookInMillis());
-                }
-                builder.field(Fields.ERRORS, response.hasFailures());
-                builder.startArray(Fields.ITEMS);
-                for (BulkItemResponse itemResponse : response) {
-                    itemResponse.toXContent(builder, request);
-                }
-                builder.endArray();
-
-                builder.endObject();
-                return new BytesRestResponse(OK, builder);
-            }
-        });
+        return channel -> client.bulk(bulkRequest, new RestStatusToXContentListener<>(channel));
     }
 
     @Override
     public boolean supportsContentStream() {
         return true;
     }
-
-    static final class Fields {
-        static final String ITEMS = "items";
-        static final String ERRORS = "errors";
-        static final String TOOK = "took";
-        static final String INGEST_TOOK = "ingest_took";
-    }
 }
diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java
index cd1084e4d26..832228bcd8a 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java
@@ -44,7 +44,7 @@ public class RestDeleteAction extends BaseRestHandler {
     public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
         DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), request.param("type"), request.param("id"));
         deleteRequest.routing(request.param("routing"));
-        deleteRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
+        deleteRequest.parent(request.param("parent"));
         deleteRequest.timeout(request.paramAsTime("timeout", DeleteRequest.DEFAULT_TIMEOUT));
         deleteRequest.setRefreshPolicy(request.param("refresh"));
         deleteRequest.version(RestActions.parseVersion(request));
diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java
index c1a7c586809..e6a56f2c429 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java
@@ -53,7 +53,7 @@ public class RestGetAction extends BaseRestHandler {
         final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id"));
         getRequest.operationThreaded(true);
         getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh()));
-        getRequest.routing(request.param("routing"));  // order is important, set it after routing, so it will set the routing
+        getRequest.routing(request.param("routing"));
         getRequest.parent(request.param("parent"));
         getRequest.preference(request.param("preference"));
         getRequest.realtime(request.paramAsBoolean("realtime", getRequest.realtime()));
diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java
index c7bea00cf0c..a32848841a4 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java
@@ -62,7 +62,7 @@ public class RestIndexAction extends BaseRestHandler {
     public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
         IndexRequest indexRequest = new IndexRequest(request.param("index"), request.param("type"), request.param("id"));
         indexRequest.routing(request.param("routing"));
-        indexRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
+        indexRequest.parent(request.param("parent"));
         indexRequest.setPipeline(request.param("pipeline"));
         indexRequest.source(request.content(), request.getXContentType());
         indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT));
diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java
index 1b21f6c710c..10a02d75bfd 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java
@@ -83,7 +83,7 @@ public class RestUpdateAction extends BaseRestHandler {
             IndexRequest upsertRequest = updateRequest.upsertRequest();
             if (upsertRequest != null) {
                 upsertRequest.routing(request.param("routing"));
-                upsertRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
+                upsertRequest.parent(request.param("parent"));
                 upsertRequest.version(RestActions.parseVersion(request));
                 upsertRequest.versionType(VersionType.fromString(request.param("version_type"), upsertRequest.versionType()));
             }
diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java
index 47252f5a102..5f39db3a357 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java
@@ -22,7 +22,6 @@ package org.elasticsearch.rest.action.search;
 import org.elasticsearch.action.search.ClearScrollRequest;
 import org.elasticsearch.client.node.NodeClient;
 import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.rest.BaseRestHandler;
@@ -48,15 +47,8 @@ public class RestClearScrollAction extends BaseRestHandler {
         String scrollIds = request.param("scroll_id");
         ClearScrollRequest clearRequest = new ClearScrollRequest();
         clearRequest.setScrollIds(Arrays.asList(splitScrollIds(scrollIds)));
-        request.withContentOrSourceParamParserOrNullLenient((xContentParser -> {
-            if (xContentParser == null) {
-                if (request.hasContent()) {
-                    // TODO: why do we accept this plain text value? maybe we can just use the scroll params?
-                    BytesReference body = request.content();
-                    String bodyScrollIds = body.utf8ToString();
-                    clearRequest.setScrollIds(Arrays.asList(splitScrollIds(bodyScrollIds)));
-                }
-            } else {
+        request.withContentOrSourceParamParserOrNull((xContentParser -> {
+            if (xContentParser != null) {
                 // NOTE: if rest request with xcontent body has request parameters, these parameters does not override xcontent value
                 clearRequest.setScrollIds(null);
                 try {
@@ -70,11 +62,6 @@ public class RestClearScrollAction extends BaseRestHandler {
         return channel -> client.clearScroll(clearRequest, new RestStatusToXContentListener<>(channel));
     }
 
-    @Override
-    public boolean supportsPlainText() {
-        return true;
-    }
-
     private static String[] splitScrollIds(String scrollIds) {
         if (scrollIds == null) {
             return Strings.EMPTY_ARRAY;
diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
index bf8308202b7..89e2f23861c 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
@@ -20,7 +20,6 @@
 package org.elasticsearch.rest.action.search;
 
 import org.elasticsearch.action.search.SearchRequest;
-import org.elasticsearch.action.search.SearchType;
 import org.elasticsearch.action.support.IndicesOptions;
 import org.elasticsearch.client.node.NodeClient;
 import org.elasticsearch.common.Strings;
@@ -94,6 +93,9 @@ public class RestSearchAction extends BaseRestHandler {
             searchRequest.source().parseXContent(context);
         }
 
+        final int batchedReduceSize = request.paramAsInt("batched_reduce_size", searchRequest.getBatchedReduceSize());
+        searchRequest.setBatchedReduceSize(batchedReduceSize);
+
         // do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types
         // from the REST layer. these modes are an internal optimization and should
         // not be specified explicitly by the user.
diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java
index 2a60fc6317a..feba6640b65 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java
@@ -21,7 +21,6 @@ package org.elasticsearch.rest.action.search;
 
 import org.elasticsearch.action.search.SearchScrollRequest;
 import org.elasticsearch.client.node.NodeClient;
-import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.xcontent.XContentParser;
@@ -58,32 +57,17 @@ public class RestSearchScrollAction extends BaseRestHandler {
         }
 
         request.withContentOrSourceParamParserOrNull(xContentParser -> {
-            if (xContentParser == null) {
-                if (request.hasContent()) {
-                    // TODO: why do we accept this plain text value? maybe we can just use the scroll params?
-                    BytesReference body = request.getContentOrSourceParamOnly();
-                    if (scrollId == null) {
-                        String bodyScrollId = body.utf8ToString();
-                        searchScrollRequest.scrollId(bodyScrollId);
-                    }
-                }
-            } else {
+            if (xContentParser != null) {
                 // NOTE: if rest request with xcontent body has request parameters, these parameters override xcontent values
                 try {
                     buildFromContent(xContentParser, searchScrollRequest);
                 } catch (IOException e) {
                     throw new IllegalArgumentException("Failed to parse request body", e);
                 }
-            }
-        });
+            }});
         return channel -> client.searchScroll(searchScrollRequest, new RestStatusToXContentListener<>(channel));
     }
 
-    @Override
-    public boolean supportsPlainText() {
-        return true;
-    }
-
     public static void buildFromContent(XContentParser parser, SearchScrollRequest searchScrollRequest) throws IOException {
         if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
             throw new IllegalArgumentException("Malformed content, must start with an object");
diff --git a/core/src/main/java/org/elasticsearch/script/Script.java b/core/src/main/java/org/elasticsearch/script/Script.java
index 4d807885d7a..ad4d4014675 100644
--- a/core/src/main/java/org/elasticsearch/script/Script.java
+++ b/core/src/main/java/org/elasticsearch/script/Script.java
@@ -169,9 +169,10 @@ public final class Script implements ToXContentObject, Writeable {
                 type = ScriptType.INLINE;
 
                 if (parser.currentToken() == Token.START_OBJECT) {
-                    XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType());
-                    idOrCode = builder.copyCurrentStructure(parser).bytes().utf8ToString();
-                    options.put(CONTENT_TYPE_OPTION, parser.contentType().mediaType());
+                    //this is really for search templates, that need to be converted to json format
+                    XContentBuilder builder = XContentFactory.jsonBuilder();
+                    idOrCode = builder.copyCurrentStructure(parser).string();
+                    options.put(CONTENT_TYPE_OPTION, XContentType.JSON.mediaType());
                 } else {
                     idOrCode = parser.text();
                 }
diff --git a/core/src/main/java/org/elasticsearch/script/StoredScriptSource.java b/core/src/main/java/org/elasticsearch/script/StoredScriptSource.java
index e6c5b09362c..11b78213908 100644
--- a/core/src/main/java/org/elasticsearch/script/StoredScriptSource.java
+++ b/core/src/main/java/org/elasticsearch/script/StoredScriptSource.java
@@ -37,7 +37,6 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
 import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.common.xcontent.XContentHelper;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.common.xcontent.XContentParser.Token;
 import org.elasticsearch.common.xcontent.XContentType;
@@ -107,9 +106,10 @@ public class StoredScriptSource extends AbstractDiffable imp
         private void setCode(XContentParser parser) {
             try {
                 if (parser.currentToken() == Token.START_OBJECT) {
-                    XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType());
-                    code = builder.copyCurrentStructure(parser).bytes().utf8ToString();
-                    options.put(Script.CONTENT_TYPE_OPTION, parser.contentType().mediaType());
+                    //this is really for search templates, that need to be converted to json format
+                    XContentBuilder builder = XContentFactory.jsonBuilder();
+                    code = builder.copyCurrentStructure(parser).string();
+                    options.put(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType());
                 } else {
                     code = parser.text();
                 }
@@ -263,11 +263,11 @@ public class StoredScriptSource extends AbstractDiffable imp
                     if (lang == null) {
                         return PARSER.apply(parser, null).build();
                     } else {
-                        try (XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType())) {
+                        //this is really for search templates, that need to be converted to json format
+                        try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
                             builder.copyCurrentStructure(parser);
-
                             return new StoredScriptSource(lang, builder.string(),
-                                Collections.singletonMap(Script.CONTENT_TYPE_OPTION, parser.contentType().mediaType()));
+                                Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()));
                         }
                     }
 
@@ -284,11 +284,11 @@ public class StoredScriptSource extends AbstractDiffable imp
 
                     if (token == Token.VALUE_STRING) {
                         return new StoredScriptSource(lang, parser.text(),
-                            Collections.singletonMap(Script.CONTENT_TYPE_OPTION, parser.contentType().mediaType()));
+                            Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()));
                     }
                 }
 
-                try (XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType())) {
+                try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
                     if (token != Token.START_OBJECT) {
                         builder.startObject();
                         builder.copyCurrentStructure(parser);
@@ -298,7 +298,7 @@ public class StoredScriptSource extends AbstractDiffable imp
                     }
 
                     return new StoredScriptSource(lang, builder.string(),
-                        Collections.singletonMap(Script.CONTENT_TYPE_OPTION, parser.contentType().mediaType()));
+                        Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()));
                 }
             }
         } catch (IOException ioe) {
diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java
index 9044db37a33..3d093e5ae72 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchService.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchService.java
@@ -561,7 +561,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
     }
 
     private void contextProcessedSuccessfully(SearchContext context) {
-        context.accessed(threadPool.estimatedTimeInMillis());
+        context.accessed(threadPool.relativeTimeInMillis());
     }
 
     private void cleanContext(SearchContext context) {
@@ -794,7 +794,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
     class Reaper implements Runnable {
         @Override
         public void run() {
-            final long time = threadPool.estimatedTimeInMillis();
+            final long time = threadPool.relativeTimeInMillis();
             for (SearchContext context : activeContexts.values()) {
                 // Use the same value for both checks since lastAccessTime can
                 // be modified by another thread between checks!
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java
index 6af896426a7..563a958109b 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java
@@ -47,10 +47,21 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, Na
 
         private final BigArrays bigArrays;
         private final ScriptService scriptService;
+        private final boolean isFinalReduce;
 
-        public ReduceContext(BigArrays bigArrays, ScriptService scriptService) {
+        public ReduceContext(BigArrays bigArrays, ScriptService scriptService, boolean isFinalReduce) {
             this.bigArrays = bigArrays;
             this.scriptService = scriptService;
+            this.isFinalReduce = isFinalReduce;
+        }
+
+        /**
+         * Returns true iff the current reduce phase is the final reduce phase. This indicates if operations like
+         * pipeline aggregations should be applied or if specific features like minDocCount should be taken into account.
+         * Operations that are potentially loosing information can only be applied during the final reduce phase.
+         */
+        public boolean isFinalReduce() {
+            return isFinalReduce;
         }
 
         public BigArrays bigArrays() {
@@ -111,8 +122,10 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, Na
      */
     public final InternalAggregation reduce(List aggregations, ReduceContext reduceContext) {
         InternalAggregation aggResult = doReduce(aggregations, reduceContext);
-        for (PipelineAggregator pipelineAggregator : pipelineAggregators) {
-            aggResult = pipelineAggregator.reduce(aggResult, reduceContext);
+        if (reduceContext.isFinalReduce()) {
+            for (PipelineAggregator pipelineAggregator : pipelineAggregators) {
+                aggResult = pipelineAggregator.reduce(aggResult, reduceContext);
+            }
         }
         return aggResult;
     }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java
index e8b04680064..2da4ae7fe33 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java
@@ -30,6 +30,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
 /**
  * A base class for all the single bucket aggregations.
@@ -80,7 +81,7 @@ public abstract class InternalSingleBucketAggregation extends InternalAggregatio
     /**
      * Create a new copy of this {@link Aggregation} with the same settings as
      * this {@link Aggregation} and contains the provided sub-aggregations.
-     * 
+     *
      * @param subAggregations
      *            the buckets to use in the new {@link Aggregation}
      * @return the new {@link Aggregation}
@@ -133,4 +134,16 @@ public abstract class InternalSingleBucketAggregation extends InternalAggregatio
         aggregations.toXContentInternal(builder, params);
         return builder;
     }
+
+    @Override
+    protected boolean doEquals(Object obj) {
+        InternalSingleBucketAggregation other = (InternalSingleBucketAggregation) obj;
+        return Objects.equals(docCount, other.docCount) &&
+                Objects.equals(aggregations, other.aggregations);
+    }
+
+    @Override
+    protected int doHashCode() {
+        return Objects.hash(docCount, aggregations);
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java
index 085f18c0e1e..ef268f8a504 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java
@@ -192,7 +192,7 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation> cursor : buckets) {
             List sameCellBuckets = cursor.value;
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
index f24fc5c127e..a8976aaa1ac 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
@@ -285,7 +285,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
                 if (top.current.key != key) {
                     // the key changes, reduce what we already buffered and reset the buffer for current buckets
                     final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
-                    if (reduced.getDocCount() >= minDocCount) {
+                    if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
                         reducedBuckets.add(reduced);
                     }
                     currentBuckets.clear();
@@ -306,7 +306,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
 
             if (currentBuckets.isEmpty() == false) {
                 final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
-                if (reduced.getDocCount() >= minDocCount) {
+                if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
                     reducedBuckets.add(reduced);
                 }
             }
@@ -382,7 +382,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
             addEmptyBuckets(reducedBuckets, reduceContext);
         }
 
-        if (order == InternalOrder.KEY_ASC) {
+        if (order == InternalOrder.KEY_ASC || reduceContext.isFinalReduce() == false) {
             // nothing to do, data are already sorted since shards return
             // sorted buckets and the merge-sort performed by reduceBuckets
             // maintains order
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
index eb90dfae732..e6e23d3a615 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
@@ -308,7 +308,7 @@ public final class InternalHistogram extends InternalMultiBucketAggregation= minDocCount) {
+                    if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
                         reducedBuckets.add(reduced);
                     }
                     currentBuckets.clear();
@@ -329,7 +329,7 @@ public final class InternalHistogram extends InternalMultiBucketAggregation= minDocCount) {
+                if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
                     reducedBuckets.add(reduced);
                 }
             }
@@ -400,7 +400,7 @@ public final class InternalHistogram extends InternalMultiBucketAggregation pipelineAggregators, Map metaData) {
+
+    InternalSampler(String name, long docCount, InternalAggregations subAggregations, List pipelineAggregators,
+            Map metaData) {
         super(name, docCount, subAggregations, pipelineAggregators, metaData);
     }
 
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java
index ca93b3603a2..ee01260acb3 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java
@@ -125,8 +125,8 @@ public class SamplerAggregator extends SingleBucketAggregator {
             this.parseField = parseField;
         }
 
-        abstract Aggregator create(String name, AggregatorFactories factories, int shardSize, int maxDocsPerValue, ValuesSource valuesSource,
-                SearchContext context, Aggregator parent, List pipelineAggregators,
+        abstract Aggregator create(String name, AggregatorFactories factories, int shardSize, int maxDocsPerValue,
+                ValuesSource valuesSource, SearchContext context, Aggregator parent, List pipelineAggregators,
                 Map metaData) throws IOException;
 
         abstract boolean needsGlobalOrdinals();
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java
index cdd1f8d19a7..6fcee8e937e 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java
@@ -196,15 +196,14 @@ public abstract class InternalSignificantTerms ordered = new BucketSignificancePriorityQueue<>(size);
         for (Map.Entry> entry : buckets.entrySet()) {
             List sameTermBuckets = entry.getValue();
             final B b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext);
             b.updateScore(heuristic);
-            if ((b.score > 0) && (b.subsetDf >= minDocCount)) {
+            if (((b.score > 0) && (b.subsetDf >= minDocCount)) || reduceContext.isFinalReduce() == false) {
                 ordered.insertWithOverflow(b);
             }
         }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java
index 1e7be237f88..86bccbed575 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java
@@ -30,6 +30,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
 /**
  * Result of the {@link TermsAggregator} when the field is some kind of decimal number like a float, double, or distance.
@@ -99,6 +100,16 @@ public class DoubleTerms extends InternalMappedTerms pipelineAggregators,
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java
index e3f842a08de..e784d9bc720 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java
@@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.function.Function;
 import java.util.stream.Collectors;
 
@@ -110,4 +111,20 @@ public abstract class InternalMappedTerms, B exten
         }
         return bucketMap.get(term);
     }
+
+    @Override
+    protected boolean doEquals(Object obj) {
+        InternalMappedTerms that = (InternalMappedTerms) obj;
+        return super.doEquals(obj)
+                && Objects.equals(buckets, that.buckets)
+                && Objects.equals(format, that.format)
+                && Objects.equals(otherDocCount, that.otherDocCount)
+                && Objects.equals(showTermDocCountError, that.showTermDocCountError)
+                && Objects.equals(shardSize, that.shardSize);
+    }
+
+    @Override
+    protected int doHashCode() {
+        return Objects.hash(super.doHashCode(), buckets, format, otherDocCount, showTermDocCountError, shardSize);
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
index a8b4c44ce46..938b20d9fc8 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
@@ -33,9 +33,11 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
 import static java.util.Collections.unmodifiableList;
 
@@ -135,6 +137,25 @@ public abstract class InternalTerms, B extends Int
             InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context);
             return newBucket(docCount, aggs, docCountError);
         }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (obj == null || getClass() != obj.getClass()) {
+                return false;
+            }
+            Bucket that = (Bucket) obj;
+            // No need to take format and showDocCountError, they are attributes
+            // of the parent terms aggregation object that are only copied here
+            // for serialization purposes
+            return Objects.equals(docCount, that.docCount)
+                    && Objects.equals(docCountError, that.docCountError)
+                    && Objects.equals(aggregations, that.aggregations);
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(getClass(), docCount, docCountError, aggregations);
+        }
     }
 
     protected final Terms.Order order;
@@ -228,8 +249,8 @@ public abstract class InternalTerms, B extends Int
             }
         }
 
-        final int size = Math.min(requiredSize, buckets.size());
-        BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, order.comparator(null));
+        final int size = reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size());
+        final BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, order.comparator(null));
         for (List sameTermBuckets : buckets.values()) {
             final B b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext);
             if (b.docCountError != -1) {
@@ -239,7 +260,7 @@ public abstract class InternalTerms, B extends Int
                     b.docCountError = sumDocCountError - b.docCountError;
                 }
             }
-            if (b.docCount >= minDocCount) {
+            if (b.docCount >= minDocCount || reduceContext.isFinalReduce() == false) {
                 B removed = ordered.insertWithOverflow(b);
                 if (removed != null) {
                     otherDocCount += removed.getDocCount();
@@ -269,4 +290,17 @@ public abstract class InternalTerms, B extends Int
      * Create an array to hold some buckets. Used in collecting the results.
      */
     protected abstract B[] createBucketsArray(int size);
+
+    @Override
+    protected boolean doEquals(Object obj) {
+        InternalTerms that = (InternalTerms) obj;
+        return Objects.equals(minDocCount, that.minDocCount)
+                && Objects.equals(order, that.order)
+                && Objects.equals(requiredSize, that.requiredSize);
+    }
+
+    @Override
+    protected int doHashCode() {
+        return Objects.hash(minDocCount, order, requiredSize);
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java
index b1799b52ace..f3339de6738 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java
@@ -30,6 +30,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
 /**
  * Result of the {@link TermsAggregator} when the field is some kind of whole number like a integer, long, or a date.
@@ -99,6 +100,16 @@ public class LongTerms extends InternalMappedTerms
             builder.endObject();
             return builder;
         }
+
+        @Override
+        public boolean equals(Object obj) {
+            return super.equals(obj) && Objects.equals(term, ((Bucket) obj).term);
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(super.hashCode(), term);
+        }
     }
 
     public LongTerms(String name, Terms.Order order, int requiredSize, long minDocCount, List pipelineAggregators,
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java
index 4a40f77b2b2..3fd41dc3aed 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java
@@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
 /**
  * Result of the {@link TermsAggregator} when the field is a String.
@@ -95,6 +96,16 @@ public class StringTerms extends InternalMappedTerms pipelineAggregators,
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java
index 010e24346bf..dba16397fc0 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java
@@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.metrics;
 
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.search.DocValueFormat;
+import org.elasticsearch.search.aggregations.InternalAggregation;
 import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 
 import java.io.IOException;
@@ -27,7 +28,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 
-public abstract class InternalNumericMetricsAggregation extends InternalMetricsAggregation {
+public abstract class InternalNumericMetricsAggregation extends InternalAggregation {
 
     private static final DocValueFormat DEFAULT_FORMAT = DocValueFormat.RAW;
 
@@ -118,7 +119,7 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA
             return false;
         }
         InternalNumericMetricsAggregation other = (InternalNumericMetricsAggregation) obj;
-        return super.equals(obj) && 
+        return super.equals(obj) &&
                 Objects.equals(format, other.format);
     }
 }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java
index 5a9df082965..2a3d03e43e6 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java
@@ -24,25 +24,25 @@ import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.search.aggregations.InternalAggregation;
-import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation;
 import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
-public class InternalGeoBounds extends InternalMetricsAggregation implements GeoBounds {
-    private final double top;
-    private final double bottom;
-    private final double posLeft;
-    private final double posRight;
-    private final double negLeft;
-    private final double negRight;
-    private final boolean wrapLongitude;
+public class InternalGeoBounds extends InternalAggregation implements GeoBounds {
+    final double top;
+    final double bottom;
+    final double posLeft;
+    final double posRight;
+    final double negLeft;
+    final double negRight;
+    final boolean wrapLongitude;
 
     InternalGeoBounds(String name, double top, double bottom, double posLeft, double posRight,
-            double negLeft, double negRight, boolean wrapLongitude,
-            List pipelineAggregators, Map metaData) {
+                      double negLeft, double negRight, boolean wrapLongitude,
+                      List pipelineAggregators, Map metaData) {
         super(name, pipelineAggregators, metaData);
         this.top = top;
         this.bottom = bottom;
@@ -82,7 +82,7 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo
     public String getWriteableName() {
         return GeoBoundsAggregationBuilder.NAME;
     }
-    
+
     @Override
     public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) {
         double top = Double.NEGATIVE_INFINITY;
@@ -187,21 +187,21 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo
     private static class BoundingBox {
         private final GeoPoint topLeft;
         private final GeoPoint bottomRight;
-        
+
         BoundingBox(GeoPoint topLeft, GeoPoint bottomRight) {
             this.topLeft = topLeft;
             this.bottomRight = bottomRight;
         }
-        
+
         public GeoPoint topLeft() {
             return topLeft;
         }
-        
+
         public GeoPoint bottomRight() {
             return bottomRight;
         }
     }
-    
+
     private BoundingBox resolveBoundingBox() {
         if (Double.isInfinite(top)) {
             return null;
@@ -242,4 +242,19 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo
         }
     }
 
+    @Override
+    protected boolean doEquals(Object obj) {
+        InternalGeoBounds other = (InternalGeoBounds) obj;
+        return bottom == other.bottom &&
+            posLeft == other.posLeft &&
+            posRight == other.posRight &&
+            negLeft == other.negLeft &&
+            negRight == other.negRight &&
+            wrapLongitude == other.wrapLongitude;
+    }
+
+    @Override
+    protected int doHashCode() {
+        return Objects.hash(bottom, posLeft, posRight, negLeft, negRight, wrapLongitude);
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java
index 06d9d369029..a5a8058ed28 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java
@@ -25,7 +25,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.search.aggregations.InternalAggregation;
-import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation;
 import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 
 import java.io.IOException;
@@ -35,7 +34,7 @@ import java.util.Map;
 /**
  * Serialization and merge logic for {@link GeoCentroidAggregator}.
  */
-public class InternalGeoCentroid extends InternalMetricsAggregation implements GeoCentroid {
+public class InternalGeoCentroid extends InternalAggregation implements GeoCentroid {
     protected final GeoPoint centroid;
     protected final long count;
 
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java
index 6cb3b626f91..bb8e1ac48d3 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java
@@ -27,7 +27,6 @@ import org.elasticsearch.script.ExecutableScript;
 import org.elasticsearch.script.Script;
 import org.elasticsearch.script.ScriptContext;
 import org.elasticsearch.search.aggregations.InternalAggregation;
-import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation;
 import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 
 import java.io.IOException;
@@ -37,11 +36,16 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-public class InternalScriptedMetric extends InternalMetricsAggregation implements ScriptedMetric {
+public class InternalScriptedMetric extends InternalAggregation implements ScriptedMetric {
     private final Script reduceScript;
-    private final Object aggregation;
+    private final List aggregation;
 
     public InternalScriptedMetric(String name, Object aggregation, Script reduceScript, List pipelineAggregators,
+                                  Map metaData) {
+        this(name, Collections.singletonList(aggregation), reduceScript, pipelineAggregators, metaData);
+    }
+
+    private InternalScriptedMetric(String name, List aggregation, Script reduceScript, List pipelineAggregators,
             Map metaData) {
         super(name, pipelineAggregators, metaData);
         this.aggregation = aggregation;
@@ -54,13 +58,13 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement
     public InternalScriptedMetric(StreamInput in) throws IOException {
         super(in);
         reduceScript = in.readOptionalWriteable(Script::new);
-        aggregation = in.readGenericValue();
+        aggregation = Collections.singletonList(in.readGenericValue());
     }
 
     @Override
     protected void doWriteTo(StreamOutput out) throws IOException {
         out.writeOptionalWriteable(reduceScript);
-        out.writeGenericValue(aggregation);
+        out.writeGenericValue(aggregation());
     }
 
     @Override
@@ -70,7 +74,10 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement
 
     @Override
     public Object aggregation() {
-        return aggregation;
+        if (aggregation.size() != 1) {
+            throw new IllegalStateException("aggregation was not reduced");
+        }
+        return aggregation.get(0);
     }
 
     @Override
@@ -78,11 +85,11 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement
         List aggregationObjects = new ArrayList<>();
         for (InternalAggregation aggregation : aggregations) {
             InternalScriptedMetric mapReduceAggregation = (InternalScriptedMetric) aggregation;
-            aggregationObjects.add(mapReduceAggregation.aggregation());
+            aggregationObjects.addAll(mapReduceAggregation.aggregation);
         }
         InternalScriptedMetric firstAggregation = ((InternalScriptedMetric) aggregations.get(0));
-        Object aggregation;
-        if (firstAggregation.reduceScript != null) {
+        List aggregation;
+        if (firstAggregation.reduceScript != null && reduceContext.isFinalReduce()) {
             Map vars = new HashMap<>();
             vars.put("_aggs", aggregationObjects);
             if (firstAggregation.reduceScript.getParams() != null) {
@@ -91,13 +98,16 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement
             CompiledScript compiledScript = reduceContext.scriptService().compile(
                 firstAggregation.reduceScript, ScriptContext.Standard.AGGS);
             ExecutableScript script = reduceContext.scriptService().executable(compiledScript, vars);
-            aggregation = script.run();
+            aggregation = Collections.singletonList(script.run());
+        } else if (reduceContext.isFinalReduce())  {
+            aggregation = Collections.singletonList(aggregationObjects);
         } else {
+            // if we are not an final reduce we have to maintain all the aggs from all the incoming one
+            // until we hit the final reduce phase.
             aggregation = aggregationObjects;
         }
         return new InternalScriptedMetric(firstAggregation.getName(), aggregation, firstAggregation.reduceScript, pipelineAggregators(),
                 getMetaData());
-
     }
 
     @Override
@@ -105,7 +115,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement
         if (path.isEmpty()) {
             return this;
         } else if (path.size() == 1 && "value".equals(path.get(0))) {
-            return aggregation;
+            return aggregation();
         } else {
             throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path);
         }
@@ -113,7 +123,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement
 
     @Override
     public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
-        return builder.field("value", aggregation);
+        return builder.field("value", aggregation());
     }
 
 }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java
index e060826c24c..08c9292d54e 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java
@@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
 public class InternalStats extends InternalNumericMetricsAggregation.MultiValue implements Stats {
     enum Metrics {
@@ -198,4 +199,18 @@ public class InternalStats extends InternalNumericMetricsAggregation.MultiValue
     protected XContentBuilder otherStatsToXCotent(XContentBuilder builder, Params params) throws IOException {
         return builder;
     }
+
+    @Override
+    protected int doHashCode() {
+        return Objects.hash(count, min, max, sum);
+    }
+
+    @Override
+    protected boolean doEquals(Object obj) {
+        InternalStats other = (InternalStats) obj;
+        return count == other.count &&
+            min == other.min &&
+            max == other.max &&
+            Double.compare(count, other.count) == 0;
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java
index 499111d5668..d6faf5cbb78 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java
@@ -189,8 +189,8 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue
 
     @Override
     public InternalAggregation buildEmptyAggregation() {
-        return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d, sigma, format, pipelineAggregators(),
-                metaData());
+        return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d,
+            sigma, format, pipelineAggregators(), metaData());
     }
 
     @Override
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java
index d848001171c..370399bfbb8 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java
@@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
 public class InternalExtendedStats extends InternalStats implements ExtendedStats {
     enum Metrics {
@@ -90,6 +91,10 @@ public class InternalExtendedStats extends InternalStats implements ExtendedStat
         return super.value(name);
     }
 
+    public double getSigma() {
+        return this.sigma;
+    }
+
     @Override
     public double getSumOfSquares() {
         return sumOfSqrs;
@@ -186,4 +191,17 @@ public class InternalExtendedStats extends InternalStats implements ExtendedStat
         }
         return builder;
     }
+
+    @Override
+    protected int doHashCode() {
+        return Objects.hash(super.doHashCode(), sumOfSqrs, sigma);
+    }
+
+    @Override
+    protected boolean doEquals(Object obj) {
+        InternalExtendedStats other = (InternalExtendedStats) obj;
+        return super.doEquals(obj) &&
+            Double.compare(sumOfSqrs, other.sumOfSqrs) == 0 &&
+            Double.compare(sigma, other.sigma) == 0;
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java
index baa8c45e140..1b32e6e9dee 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java
@@ -27,11 +27,10 @@ import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.lucene.Lucene;
 import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.SearchHit;
 import org.elasticsearch.search.SearchHits;
 import org.elasticsearch.search.aggregations.InternalAggregation;
-import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation;
 import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
-import org.elasticsearch.search.SearchHit;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -41,7 +40,7 @@ import java.util.Map;
 /**
  * Results of the {@link TopHitsAggregator}.
  */
-public class InternalTopHits extends InternalMetricsAggregation implements TopHits {
+public class InternalTopHits extends InternalAggregation implements TopHits {
     private int from;
     private int size;
     private TopDocs topDocs;
@@ -96,7 +95,18 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi
 
     @Override
     public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) {
-        SearchHits[] shardHits = new SearchHits[aggregations.size()];
+        final SearchHits[] shardHits = new SearchHits[aggregations.size()];
+        final int from;
+        final int size;
+        if (reduceContext.isFinalReduce()) {
+            from = this.from;
+            size = this.size;
+        } else {
+            // if we are not in the final reduce we need to ensure we maintain all possible elements during reduce
+            // hence for pagination we need to maintain all hits until we are in the final phase.
+            from = 0;
+            size = this.from + this.size;
+        }
 
         final TopDocs reducedTopDocs;
         final TopDocs[] shardDocs;
@@ -106,7 +116,7 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi
             shardDocs = new TopFieldDocs[aggregations.size()];
             for (int i = 0; i < shardDocs.length; i++) {
                 InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i);
-                shardDocs[i] = (TopFieldDocs) topHitsAgg.topDocs;
+                shardDocs[i] = topHitsAgg.topDocs;
                 shardHits[i] = topHitsAgg.searchHits;
             }
             reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs);
@@ -130,7 +140,7 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi
             } while (shardDocs[scoreDoc.shardIndex].scoreDocs[position] != scoreDoc);
             hits[i] = shardHits[scoreDoc.shardIndex].getAt(position);
         }
-        return new InternalTopHits(name, from, size, reducedTopDocs, new SearchHits(hits, reducedTopDocs.totalHits,
+        return new InternalTopHits(name, this.from, this.size, reducedTopDocs, new SearchHits(hits, reducedTopDocs.totalHits,
                 reducedTopDocs.getMaxScore()),
                 pipelineAggregators(), getMetaData());
     }
@@ -162,7 +172,7 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi
             ScoreDoc thisDoc = topDocs.scoreDocs[d];
             ScoreDoc otherDoc = other.topDocs.scoreDocs[d];
             if (thisDoc.doc != otherDoc.doc) return false;
-            if (thisDoc.score != otherDoc.score) return false;
+            if (Double.compare(thisDoc.score, otherDoc.score) != 0) return false;
             if (thisDoc.shardIndex != otherDoc.shardIndex) return false;
             if (thisDoc instanceof FieldDoc) {
                 if (false == (otherDoc instanceof FieldDoc)) return false;
diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
index abfbd376714..37d7eb5b027 100644
--- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
@@ -38,11 +38,11 @@ import org.elasticsearch.index.query.QueryBuilder;
 import org.elasticsearch.index.query.QueryParseContext;
 import org.elasticsearch.index.query.QueryShardContext;
 import org.elasticsearch.script.Script;
-import org.elasticsearch.search.collapse.CollapseBuilder;
 import org.elasticsearch.search.SearchExtBuilder;
 import org.elasticsearch.search.aggregations.AggregationBuilder;
 import org.elasticsearch.search.aggregations.AggregatorFactories;
 import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
+import org.elasticsearch.search.collapse.CollapseBuilder;
 import org.elasticsearch.search.fetch.StoredFieldsContext;
 import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
 import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
@@ -314,6 +314,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
      * From index to start the search from. Defaults to 0.
      */
     public SearchSourceBuilder from(int from) {
+        if (from < 0) {
+            throw new IllegalArgumentException("[from] parameter cannot be negative");
+        }
         this.from = from;
         return this;
     }
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java
index e3a78227d9c..3a3c1cfd66d 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java
@@ -21,6 +21,7 @@ package org.elasticsearch.search.fetch.subphase.highlight;
 
 import org.apache.lucene.search.highlight.SimpleFragmenter;
 import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
+import org.elasticsearch.Version;
 import org.elasticsearch.action.support.ToXContentToBytes;
 import org.elasticsearch.common.ParseField;
 import org.elasticsearch.common.ParsingException;
@@ -32,10 +33,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.index.query.QueryBuilder;
 import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder.BoundaryScannerType;
 import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder.Order;
 
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.Locale;
 import java.util.Map;
 import java.util.Objects;
 import java.util.function.BiFunction;
@@ -57,8 +60,10 @@ public abstract class AbstractHighlighterBuilderfvh this setting
+     * controls which scanner to use for fragment boundaries, and defaults to "simple".
+     */
+    @SuppressWarnings("unchecked")
+    public HB boundaryScannerType(String boundaryScannerType) {
+        this.boundaryScannerType = BoundaryScannerType.fromString(boundaryScannerType);
+        return (HB) this;
+    }
+
+    /**
+     * When using the highlighterType fvh this setting
+     * controls which scanner to use for fragment boundaries, and defaults to "simple".
+     */
+    @SuppressWarnings("unchecked")
+    public HB boundaryScannerType(BoundaryScannerType boundaryScannerType) {
+        this.boundaryScannerType = boundaryScannerType;
+        return (HB) this;
+    }
+
+    /**
+     * @return the value set by {@link #boundaryScannerType(String)}
+     */
+    public BoundaryScannerType boundaryScannerType() {
+        return this.boundaryScannerType;
+    }
+
     /**
      * When using the highlighterType fvh this setting
      * controls how far to look for boundary characters, and defaults to 20.
@@ -366,6 +420,25 @@ public abstract class AbstractHighlighterBuilderfvh and boundaryScannerType break_iterator, this setting
+     * controls the locale to use by the BreakIterator, defaults to "root".
+     */
+    @SuppressWarnings("unchecked")
+    public HB boundaryScannerLocale(String boundaryScannerLocale) {
+        if (boundaryScannerLocale != null) {
+            this.boundaryScannerLocale = Locale.forLanguageTag(boundaryScannerLocale);
+        }
+        return (HB) this;
+    }
+
+    /**
+     * @return the value set by {@link #boundaryScannerLocale(String)}
+     */
+    public Locale boundaryScannerLocale() {
+        return this.boundaryScannerLocale;
+    }
+
     /**
      * Allows to set custom options for custom highlighters.
      */
@@ -491,12 +564,18 @@ public abstract class AbstractHighlighterBuilder 0) {
             builder.field(OPTIONS_FIELD.getPreferredName(), options);
         }
@@ -523,8 +602,10 @@ public abstract class AbstractHighlighterBuilder hb.boundaryChars(bc.toCharArray()) , BOUNDARY_CHARS_FIELD);
+        parser.declareString(HB::boundaryScannerLocale, BOUNDARY_SCANNER_LOCALE_FIELD);
         parser.declareString(HB::highlighterType, TYPE_FIELD);
         parser.declareString(HB::fragmenter, FRAGMENTER_FIELD);
         parser.declareInt(HB::noMatchSize, NO_MATCH_SIZE_FIELD);
@@ -562,8 +643,8 @@ public abstract class AbstractHighlighterBuilder SETTING_TV_HIGHLIGHT_MULTI_VALUE = Setting.boolSetting("search.highlight.term_vector_multi_value",
         true, Setting.Property.NodeScope);
@@ -105,12 +114,7 @@ public class FastVectorHighlighter implements Highlighter {
                 FragListBuilder fragListBuilder;
                 BaseFragmentsBuilder fragmentsBuilder;
 
-                BoundaryScanner boundaryScanner = DEFAULT_BOUNDARY_SCANNER;
-                if (field.fieldOptions().boundaryMaxScan() != SimpleBoundaryScanner.DEFAULT_MAX_SCAN
-                        || field.fieldOptions().boundaryChars() != SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS) {
-                    boundaryScanner = new SimpleBoundaryScanner(field.fieldOptions().boundaryMaxScan(),
-                            field.fieldOptions().boundaryChars());
-                }
+                final BoundaryScanner boundaryScanner = getBoundaryScanner(field);
                 boolean forceSource = context.highlight().forceSource(field);
                 if (field.fieldOptions().numberOfFragments() == 0) {
                     fragListBuilder = new SingleFragListBuilder();
@@ -206,6 +210,29 @@ public class FastVectorHighlighter implements Highlighter {
                 && fieldMapper.fieldType().storeTermVectorPositions();
     }
 
+    private static BoundaryScanner getBoundaryScanner(Field field) {
+        final FieldOptions fieldOptions = field.fieldOptions();
+        final Locale boundaryScannerLocale = fieldOptions.boundaryScannerLocale();
+        switch(fieldOptions.boundaryScannerType()) {
+        case SENTENCE:
+            if (boundaryScannerLocale != null) {
+                return new BreakIteratorBoundaryScanner(BreakIterator.getSentenceInstance(boundaryScannerLocale));
+            }
+            return DEFAULT_SENTENCE_BOUNDARY_SCANNER;
+        case WORD:
+            if (boundaryScannerLocale != null) {
+                return new BreakIteratorBoundaryScanner(BreakIterator.getWordInstance(boundaryScannerLocale));
+            }
+            return DEFAULT_WORD_BOUNDARY_SCANNER;
+        default:
+            if (fieldOptions.boundaryMaxScan() != SimpleBoundaryScanner.DEFAULT_MAX_SCAN
+                    || fieldOptions.boundaryChars() != SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS) {
+                return new SimpleBoundaryScanner(fieldOptions.boundaryMaxScan(), fieldOptions.boundaryChars());
+            }
+            return DEFAULT_SIMPLE_BOUNDARY_SCANNER;
+        }
+    }
+
     private class MapperHighlightEntry {
         public FragListBuilder fragListBuilder;
         public FragmentsBuilder fragmentsBuilder;
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java
index a063b2900d5..45b8c612a76 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java
@@ -95,9 +95,9 @@ public class HighlightBuilder extends AbstractHighlighterBuilder fields = new ArrayList<>();
 
@@ -327,12 +327,18 @@ public class HighlightBuilder extends AbstractHighlighterBuilder= values().length) {
+                throw new IOException("Unknown BoundaryScannerType ordinal [" + ordinal + "]");
+            }
+            return values()[ordinal];
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            out.writeVInt(this.ordinal());
+        }
+
+        public static BoundaryScannerType fromString(String boundaryScannerType) {
+            return valueOf(boundaryScannerType.toUpperCase(Locale.ROOT));
+        }
+
+        @Override
+        public String toString() {
+            return name().toLowerCase(Locale.ROOT);
+        }
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SearchContextHighlight.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SearchContextHighlight.java
index d4731718793..2baf73ab5fa 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SearchContextHighlight.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SearchContextHighlight.java
@@ -20,11 +20,13 @@
 package org.elasticsearch.search.fetch.subphase.highlight;
 
 import org.apache.lucene.search.Query;
+import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder.BoundaryScannerType;
 
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
+import java.util.Locale;
 import java.util.Map;
 import java.util.Set;
 
@@ -110,10 +112,14 @@ public class SearchContextHighlight {
 
         private String fragmenter;
 
+        private BoundaryScannerType boundaryScannerType;
+
         private int boundaryMaxScan = -1;
 
         private Character[] boundaryChars = null;
 
+        private Locale boundaryScannerLocale;
+
         private Query highlightQuery;
 
         private int noMatchSize = -1;
@@ -168,6 +174,10 @@ public class SearchContextHighlight {
             return fragmenter;
         }
 
+        public BoundaryScannerType boundaryScannerType() {
+            return boundaryScannerType;
+        }
+
         public int boundaryMaxScan() {
             return boundaryMaxScan;
         }
@@ -176,6 +186,10 @@ public class SearchContextHighlight {
             return boundaryChars;
         }
 
+        public Locale boundaryScannerLocale() {
+            return boundaryScannerLocale;
+        }
+
         public Query highlightQuery() {
             return highlightQuery;
         }
@@ -260,6 +274,11 @@ public class SearchContextHighlight {
                 return this;
             }
 
+            Builder boundaryScannerType(BoundaryScannerType boundaryScanner) {
+                fieldOptions.boundaryScannerType = boundaryScanner;
+                return this;
+            }
+
             Builder boundaryMaxScan(int boundaryMaxScan) {
                 fieldOptions.boundaryMaxScan = boundaryMaxScan;
                 return this;
@@ -270,6 +289,11 @@ public class SearchContextHighlight {
                 return this;
             }
 
+            Builder boundaryScannerLocale(Locale boundaryScannerLocale) {
+                fieldOptions.boundaryScannerLocale = boundaryScannerLocale;
+                return this;
+            }
+
             Builder highlightQuery(Query highlightQuery) {
                 fieldOptions.highlightQuery = highlightQuery;
                 return this;
@@ -324,12 +348,18 @@ public class SearchContextHighlight {
                 if (fieldOptions.requireFieldMatch == null) {
                     fieldOptions.requireFieldMatch = globalOptions.requireFieldMatch;
                 }
+                if (fieldOptions.boundaryScannerType == null) {
+                    fieldOptions.boundaryScannerType = globalOptions.boundaryScannerType;
+                }
                 if (fieldOptions.boundaryMaxScan == -1) {
                     fieldOptions.boundaryMaxScan = globalOptions.boundaryMaxScan;
                 }
                 if (fieldOptions.boundaryChars == null && globalOptions.boundaryChars != null) {
                     fieldOptions.boundaryChars = Arrays.copyOf(globalOptions.boundaryChars, globalOptions.boundaryChars.length);
                 }
+                if (fieldOptions.boundaryScannerLocale == null) {
+                    fieldOptions.boundaryScannerLocale = globalOptions.boundaryScannerLocale;
+                }
                 if (fieldOptions.highlighterType == null) {
                     fieldOptions.highlighterType = globalOptions.highlighterType;
                 }
diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
index d396d14e983..391f6efe18b 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
@@ -38,7 +38,7 @@ import java.util.Map;
 public class InternalSearchResponse implements Streamable, ToXContent {
 
     public static InternalSearchResponse empty() {
-        return new InternalSearchResponse(SearchHits.empty(), null, null, null, false, null);
+        return new InternalSearchResponse(SearchHits.empty(), null, null, null, false, null, 1);
     }
 
     private SearchHits hits;
@@ -53,17 +53,21 @@ public class InternalSearchResponse implements Streamable, ToXContent {
 
     private Boolean terminatedEarly = null;
 
+    private int numReducePhases = 1;
+
     private InternalSearchResponse() {
     }
 
     public InternalSearchResponse(SearchHits hits, InternalAggregations aggregations, Suggest suggest,
-                                  SearchProfileShardResults profileResults, boolean timedOut, Boolean terminatedEarly) {
+                                  SearchProfileShardResults profileResults, boolean timedOut, Boolean terminatedEarly,
+                                  int numReducePhases) {
         this.hits = hits;
         this.aggregations = aggregations;
         this.suggest = suggest;
         this.profileResults = profileResults;
         this.timedOut = timedOut;
         this.terminatedEarly = terminatedEarly;
+        this.numReducePhases = numReducePhases;
     }
 
     public boolean timedOut() {
@@ -86,6 +90,13 @@ public class InternalSearchResponse implements Streamable, ToXContent {
         return suggest;
     }
 
+    /**
+     * Returns the number of reduce phases applied to obtain this search response
+     */
+    public int getNumReducePhases() {
+        return numReducePhases;
+    }
+
     /**
      * Returns the profile results for this search response (including all shards).
      * An empty map is returned if profiling was not enabled
@@ -132,6 +143,7 @@ public class InternalSearchResponse implements Streamable, ToXContent {
         timedOut = in.readBoolean();
         terminatedEarly = in.readOptionalBoolean();
         profileResults = in.readOptionalWriteable(SearchProfileShardResults::new);
+        numReducePhases = in.readVInt();
     }
 
     @Override
@@ -152,5 +164,6 @@ public class InternalSearchResponse implements Streamable, ToXContent {
         out.writeBoolean(timedOut);
         out.writeOptionalBoolean(terminatedEarly);
         out.writeOptionalWriteable(profileResults);
+        out.writeVInt(numReducePhases);
     }
 }
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java
index f1875564f85..ba5ad712f41 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java
@@ -20,11 +20,13 @@ package org.elasticsearch.search.suggest;
 
 import org.apache.lucene.util.CollectionUtil;
 import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.ParsingException;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Streamable;
 import org.elasticsearch.common.text.Text;
 import org.elasticsearch.common.xcontent.ConstructingObjectParser;
+import org.elasticsearch.common.xcontent.ObjectParser;
 import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -46,17 +48,19 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.function.Function;
 import java.util.stream.Collectors;
 
 import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
 import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
+import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
 
 /**
  * Top level suggest result, containing the result for each suggestion.
  */
 public class Suggest implements Iterable>>, Streamable, ToXContent {
 
-    private static final String NAME = "suggest";
+    static final String NAME = "suggest";
 
     public static final Comparator