Merge branch 'master' into hlclient/add-delete-method
# Conflicts: # client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java # client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java # client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java # client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java
This commit is contained in:
commit
b680e62831
|
@ -1,5 +1,5 @@
|
|||
Elasticsearch
|
||||
Copyright 2009-2016 Elasticsearch
|
||||
Copyright 2009-2017 Elasticsearch
|
||||
|
||||
This product includes software developed by The Apache Software
|
||||
Foundation (http://www.apache.org/).
|
||||
|
|
|
@ -42,7 +42,7 @@ Vagrant.configure(2) do |config|
|
|||
# debian and it works fine.
|
||||
config.vm.define "debian-8" do |config|
|
||||
config.vm.box = "elastic/debian-8-x86_64"
|
||||
deb_common config, 'echo deb http://cloudfront.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
|
||||
deb_common config
|
||||
end
|
||||
config.vm.define "centos-6" do |config|
|
||||
config.vm.box = "elastic/centos-6-x86_64"
|
||||
|
@ -114,10 +114,10 @@ SOURCE_PROMPT
|
|||
end
|
||||
|
||||
def ubuntu_common(config, extra: '')
|
||||
deb_common config, 'apt-add-repository -y ppa:openjdk-r/ppa > /dev/null 2>&1', 'openjdk-r-*', extra: extra
|
||||
deb_common config, extra: extra
|
||||
end
|
||||
|
||||
def deb_common(config, add_openjdk_repository_command, openjdk_list, extra: '')
|
||||
def deb_common(config, extra: '')
|
||||
# http://foo-o-rama.com/vagrant--stdin-is-not-a-tty--fix.html
|
||||
config.vm.provision "fix-no-tty", type: "shell" do |s|
|
||||
s.privileged = false
|
||||
|
@ -127,24 +127,14 @@ def deb_common(config, add_openjdk_repository_command, openjdk_list, extra: '')
|
|||
update_command: "apt-get update",
|
||||
update_tracking_file: "/var/cache/apt/archives/last_update",
|
||||
install_command: "apt-get install -y",
|
||||
java_package: "openjdk-8-jdk",
|
||||
extra: <<-SHELL
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
ls /etc/apt/sources.list.d/#{openjdk_list}.list > /dev/null 2>&1 ||
|
||||
(echo "==> Importing java-8 ppa" &&
|
||||
#{add_openjdk_repository_command} &&
|
||||
apt-get update)
|
||||
#{extra}
|
||||
SHELL
|
||||
)
|
||||
extra: extra)
|
||||
end
|
||||
|
||||
def rpm_common(config)
|
||||
provision(config,
|
||||
update_command: "yum check-update",
|
||||
update_tracking_file: "/var/cache/yum/last_update",
|
||||
install_command: "yum install -y",
|
||||
java_package: "java-1.8.0-openjdk-devel")
|
||||
install_command: "yum install -y")
|
||||
end
|
||||
|
||||
def dnf_common(config)
|
||||
|
@ -152,8 +142,7 @@ def dnf_common(config)
|
|||
update_command: "dnf check-update",
|
||||
update_tracking_file: "/var/cache/dnf/last_update",
|
||||
install_command: "dnf install -y",
|
||||
install_command_retries: 5,
|
||||
java_package: "java-1.8.0-openjdk-devel")
|
||||
install_command_retries: 5)
|
||||
if Vagrant.has_plugin?("vagrant-cachier")
|
||||
# Autodetect doesn't work....
|
||||
config.cache.auto_detect = false
|
||||
|
@ -170,7 +159,6 @@ def suse_common(config, extra)
|
|||
update_command: "zypper --non-interactive list-updates",
|
||||
update_tracking_file: "/var/cache/zypp/packages/last_update",
|
||||
install_command: "zypper --non-interactive --quiet install --no-recommends",
|
||||
java_package: "java-1_8_0-openjdk-devel",
|
||||
extra: extra)
|
||||
end
|
||||
|
||||
|
@ -193,7 +181,6 @@ end
|
|||
# is cached by vagrant-cachier.
|
||||
# @param install_command [String] The command used to install a package.
|
||||
# Required. Think `apt-get install #{package}`.
|
||||
# @param java_package [String] The name of the java package. Required.
|
||||
# @param extra [String] Extra provisioning commands run before anything else.
|
||||
# Optional. Used for things like setting up the ppa for Java 8.
|
||||
def provision(config,
|
||||
|
@ -201,13 +188,11 @@ def provision(config,
|
|||
update_tracking_file: 'required',
|
||||
install_command: 'required',
|
||||
install_command_retries: 0,
|
||||
java_package: 'required',
|
||||
extra: '')
|
||||
# Vagrant run ruby 2.0.0 which doesn't have required named parameters....
|
||||
raise ArgumentError.new('update_command is required') if update_command == 'required'
|
||||
raise ArgumentError.new('update_tracking_file is required') if update_tracking_file == 'required'
|
||||
raise ArgumentError.new('install_command is required') if install_command == 'required'
|
||||
raise ArgumentError.new('java_package is required') if java_package == 'required'
|
||||
config.vm.provision "bats dependencies", type: "shell", inline: <<-SHELL
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
@ -254,7 +239,10 @@ def provision(config,
|
|||
|
||||
#{extra}
|
||||
|
||||
installed java || install #{java_package}
|
||||
installed java || {
|
||||
echo "==> Java is not installed on vagrant box ${config.vm.box}"
|
||||
return 1
|
||||
}
|
||||
ensure tar
|
||||
ensure curl
|
||||
ensure unzip
|
||||
|
|
|
@ -37,10 +37,7 @@ apply plugin: 'application'
|
|||
archivesBaseName = 'elasticsearch-benchmarks'
|
||||
mainClassName = 'org.openjdk.jmh.Main'
|
||||
|
||||
// never try to invoke tests on the benchmark project - there aren't any
|
||||
check.dependsOn.remove(test)
|
||||
// explicitly override the test task too in case somebody invokes 'gradle test' so it won't trip
|
||||
task test(type: Test, overwrite: true)
|
||||
test.enabled = false
|
||||
|
||||
dependencies {
|
||||
compile("org.elasticsearch:elasticsearch:${version}") {
|
||||
|
@ -59,7 +56,6 @@ compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-u
|
|||
// enable the JMH's BenchmarkProcessor to generate the final benchmark classes
|
||||
// needs to be added separately otherwise Gradle will quote it and javac will fail
|
||||
compileJava.options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"])
|
||||
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
|
||||
|
||||
forbiddenApis {
|
||||
// classes generated by JMH can use all sorts of forbidden APIs but we have no influence at all and cannot exclude these classes
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
/**
|
||||
* A task to create a notice file which includes dependencies' notices.
|
||||
*/
|
||||
public class NoticeTask extends DefaultTask {
|
||||
|
||||
@OutputFile
|
||||
File noticeFile = new File(project.buildDir, "notices/${name}/NOTICE.txt")
|
||||
|
||||
/** Configurations to inspect dependencies*/
|
||||
private List<Project> dependencies = new ArrayList<>()
|
||||
|
||||
public NoticeTask() {
|
||||
description = 'Create a notice file from dependencies'
|
||||
}
|
||||
|
||||
/** Add notices from licenses found in the given project. */
|
||||
public void dependencies(Project project) {
|
||||
dependencies.add(project)
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
public void generateNotice() {
|
||||
StringBuilder output = new StringBuilder()
|
||||
output.append(project.rootProject.file('NOTICE.txt').getText('UTF-8'))
|
||||
output.append('\n\n')
|
||||
Set<String> seen = new HashSet<>()
|
||||
for (Project dep : dependencies) {
|
||||
File licensesDir = new File(dep.projectDir, 'licenses')
|
||||
if (licensesDir.exists() == false) continue
|
||||
licensesDir.eachFileMatch({ it ==~ /.*-NOTICE\.txt/ && seen.contains(it) == false}) { File file ->
|
||||
String name = file.name.substring(0, file.name.length() - '-NOTICE.txt'.length())
|
||||
appendFile(file, name, 'NOTICE', output)
|
||||
appendFile(new File(file.parentFile, "${name}-LICENSE.txt"), name, 'LICENSE', output)
|
||||
seen.add(file.name)
|
||||
}
|
||||
}
|
||||
noticeFile.setText(output.toString(), 'UTF-8')
|
||||
}
|
||||
|
||||
static void appendFile(File file, String name, String type, StringBuilder output) {
|
||||
String text = file.getText('UTF-8')
|
||||
if (text.trim().isEmpty()) {
|
||||
return
|
||||
}
|
||||
output.append('================================================================================\n')
|
||||
output.append("${name} ${type}\n")
|
||||
output.append('================================================================================\n')
|
||||
output.append(text)
|
||||
output.append('\n\n')
|
||||
}
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.gradle.plugin
|
||||
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.NoticeTask
|
||||
import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
import org.elasticsearch.gradle.test.RunTask
|
||||
import org.gradle.api.Project
|
||||
|
@ -71,6 +72,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
project.integTest.clusterConfig.plugin(project.path)
|
||||
project.tasks.run.clusterConfig.plugin(project.path)
|
||||
addZipPomGeneration(project)
|
||||
addNoticeGeneration(project)
|
||||
}
|
||||
|
||||
project.namingConventions {
|
||||
|
@ -118,12 +120,15 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
// add the plugin properties and metadata to test resources, so unit tests can
|
||||
// know about the plugin (used by test security code to statically initialize the plugin in unit tests)
|
||||
SourceSet testSourceSet = project.sourceSets.test
|
||||
testSourceSet.output.dir(buildProperties.generatedResourcesDir, builtBy: 'pluginProperties')
|
||||
testSourceSet.output.dir(buildProperties.descriptorOutput.parentFile, builtBy: 'pluginProperties')
|
||||
testSourceSet.resources.srcDir(pluginMetadata)
|
||||
|
||||
// create the actual bundle task, which zips up all the files for the plugin
|
||||
Zip bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [project.jar, buildProperties]) {
|
||||
from buildProperties // plugin properties file
|
||||
from(buildProperties.descriptorOutput.parentFile) {
|
||||
// plugin properties file
|
||||
include(buildProperties.descriptorOutput.name)
|
||||
}
|
||||
from pluginMetadata // metadata (eg custom security policy)
|
||||
from project.jar // this plugin's jar
|
||||
from project.configurations.runtime - project.configurations.provided // the dep jars
|
||||
|
@ -244,4 +249,19 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void addNoticeGeneration(Project project) {
|
||||
File licenseFile = project.pluginProperties.extension.licenseFile
|
||||
if (licenseFile != null) {
|
||||
project.bundlePlugin.from(licenseFile.parentFile) {
|
||||
include(licenseFile.name)
|
||||
}
|
||||
}
|
||||
File noticeFile = project.pluginProperties.extension.licenseFile
|
||||
if (noticeFile != null) {
|
||||
NoticeTask generateNotice = project.tasks.create('generateNotice', NoticeTask.class)
|
||||
generateNotice.dependencies(project)
|
||||
project.bundlePlugin.from(generateNotice)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,17 @@ class PluginPropertiesExtension {
|
|||
@Input
|
||||
boolean hasClientJar = false
|
||||
|
||||
/** A license file that should be included in the built plugin zip. */
|
||||
@Input
|
||||
File licenseFile = null
|
||||
|
||||
/**
|
||||
* A notice file that should be included in the built plugin zip. This will be
|
||||
* extended with notices from the {@code licenses/} directory.
|
||||
*/
|
||||
@Input
|
||||
File noticeFile = null
|
||||
|
||||
PluginPropertiesExtension(Project project) {
|
||||
name = project.name
|
||||
version = project.version
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.gradle.VersionProperties
|
|||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
/**
|
||||
* Creates a plugin descriptor.
|
||||
|
@ -29,20 +30,22 @@ import org.gradle.api.tasks.Copy
|
|||
class PluginPropertiesTask extends Copy {
|
||||
|
||||
PluginPropertiesExtension extension
|
||||
File generatedResourcesDir = new File(project.buildDir, 'generated-resources')
|
||||
|
||||
@OutputFile
|
||||
File descriptorOutput = new File(project.buildDir, 'generated-resources/plugin-descriptor.properties')
|
||||
|
||||
PluginPropertiesTask() {
|
||||
File templateFile = new File(project.buildDir, 'templates/plugin-descriptor.properties')
|
||||
File templateFile = new File(project.buildDir, "templates/${descriptorOutput.name}")
|
||||
Task copyPluginPropertiesTemplate = project.tasks.create('copyPluginPropertiesTemplate') {
|
||||
doLast {
|
||||
InputStream resourceTemplate = PluginPropertiesTask.getResourceAsStream('/plugin-descriptor.properties')
|
||||
InputStream resourceTemplate = PluginPropertiesTask.getResourceAsStream("/${descriptorOutput.name}")
|
||||
templateFile.parentFile.mkdirs()
|
||||
templateFile.setText(resourceTemplate.getText('UTF-8'), 'UTF-8')
|
||||
}
|
||||
}
|
||||
|
||||
dependsOn(copyPluginPropertiesTemplate)
|
||||
extension = project.extensions.create('esplugin', PluginPropertiesExtension, project)
|
||||
project.clean.delete(generatedResourcesDir)
|
||||
project.afterEvaluate {
|
||||
// check require properties are set
|
||||
if (extension.name == null) {
|
||||
|
@ -55,8 +58,8 @@ class PluginPropertiesTask extends Copy {
|
|||
throw new InvalidUserDataException('classname is a required setting for esplugin')
|
||||
}
|
||||
// configure property substitution
|
||||
from(templateFile)
|
||||
into(generatedResourcesDir)
|
||||
from(templateFile.parentFile).include(descriptorOutput.name)
|
||||
into(descriptorOutput.parentFile)
|
||||
Map<String, String> properties = generateSubstitutions()
|
||||
expand(properties)
|
||||
inputs.properties(properties)
|
||||
|
|
|
@ -91,6 +91,7 @@ class PrecommitTasks {
|
|||
if (testForbidden != null) {
|
||||
testForbidden.configure {
|
||||
signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt')
|
||||
signaturesURLs += getClass().getResource('/forbidden/http-signatures.txt')
|
||||
}
|
||||
}
|
||||
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
|
||||
|
|
|
@ -51,22 +51,18 @@ class ClusterFormationTasks {
|
|||
*
|
||||
* Returns a list of NodeInfo objects for each node in the cluster.
|
||||
*/
|
||||
static List<NodeInfo> setup(Project project, Task task, ClusterConfiguration config) {
|
||||
if (task.getEnabled() == false) {
|
||||
// no need to add cluster formation tasks if the task won't run!
|
||||
return
|
||||
}
|
||||
static List<NodeInfo> setup(Project project, String prefix, Task runner, ClusterConfiguration config) {
|
||||
File sharedDir = new File(project.buildDir, "cluster/shared")
|
||||
// first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything
|
||||
// in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk
|
||||
// such that snapshots survive failures / test runs and there is no simple way today to fix that.
|
||||
Task cleanup = project.tasks.create(name: "${task.name}#prepareCluster.cleanShared", type: Delete, dependsOn: task.dependsOn.collect()) {
|
||||
Task cleanup = project.tasks.create(name: "${prefix}#prepareCluster.cleanShared", type: Delete, dependsOn: runner.dependsOn.collect()) {
|
||||
delete sharedDir
|
||||
doLast {
|
||||
sharedDir.mkdirs()
|
||||
}
|
||||
}
|
||||
List<Task> startTasks = [cleanup]
|
||||
List<Task> startTasks = []
|
||||
List<NodeInfo> nodes = []
|
||||
if (config.numNodes < config.numBwcNodes) {
|
||||
throw new GradleException("numNodes must be >= numBwcNodes [${config.numNodes} < ${config.numBwcNodes}]")
|
||||
|
@ -75,7 +71,7 @@ class ClusterFormationTasks {
|
|||
throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0")
|
||||
}
|
||||
// this is our current version distribution configuration we use for all kinds of REST tests etc.
|
||||
String distroConfigName = "${task.name}_elasticsearchDistro"
|
||||
String distroConfigName = "${prefix}_elasticsearchDistro"
|
||||
Configuration currentDistro = project.configurations.create(distroConfigName)
|
||||
configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch)
|
||||
if (config.bwcVersion != null && config.numBwcNodes > 0) {
|
||||
|
@ -89,7 +85,7 @@ class ClusterFormationTasks {
|
|||
}
|
||||
configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion)
|
||||
for (Map.Entry<String, Project> entry : config.plugins.entrySet()) {
|
||||
configureBwcPluginDependency("${task.name}_elasticsearchBwcPlugins", project, entry.getValue(),
|
||||
configureBwcPluginDependency("${prefix}_elasticsearchBwcPlugins", project, entry.getValue(),
|
||||
project.configurations.elasticsearchBwcPlugins, config.bwcVersion)
|
||||
}
|
||||
project.configurations.elasticsearchBwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
|
||||
|
@ -104,13 +100,14 @@ class ClusterFormationTasks {
|
|||
elasticsearchVersion = config.bwcVersion
|
||||
distro = project.configurations.elasticsearchBwcDistro
|
||||
}
|
||||
NodeInfo node = new NodeInfo(config, i, project, task, elasticsearchVersion, sharedDir)
|
||||
NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir)
|
||||
nodes.add(node)
|
||||
startTasks.add(configureNode(project, task, cleanup, node, distro, nodes.get(0)))
|
||||
Task dependsOn = startTasks.empty ? cleanup : startTasks.get(0)
|
||||
startTasks.add(configureNode(project, prefix, runner, dependsOn, node, distro, nodes.get(0)))
|
||||
}
|
||||
|
||||
Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks)
|
||||
task.dependsOn(wait)
|
||||
Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks)
|
||||
runner.dependsOn(wait)
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
@ -150,58 +147,58 @@ class ClusterFormationTasks {
|
|||
*
|
||||
* @return a task which starts the node.
|
||||
*/
|
||||
static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) {
|
||||
static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) {
|
||||
|
||||
// tasks are chained so their execution order is maintained
|
||||
Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: dependsOn) {
|
||||
Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) {
|
||||
delete node.homeDir
|
||||
delete node.cwd
|
||||
doLast {
|
||||
node.cwd.mkdirs()
|
||||
}
|
||||
}
|
||||
setup = configureCheckPreviousTask(taskName(task, node, 'checkPrevious'), project, setup, node)
|
||||
setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node)
|
||||
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration)
|
||||
setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node, seedNode)
|
||||
setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node)
|
||||
setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node)
|
||||
setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, configuration)
|
||||
setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode)
|
||||
if (node.config.plugins.isEmpty() == false) {
|
||||
if (node.nodeVersion == VersionProperties.elasticsearch) {
|
||||
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
|
||||
setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node)
|
||||
} else {
|
||||
setup = configureCopyBwcPluginsTask(taskName(task, node, 'copyBwcPlugins'), project, setup, node)
|
||||
setup = configureCopyBwcPluginsTask(taskName(prefix, node, 'copyBwcPlugins'), project, setup, node)
|
||||
}
|
||||
}
|
||||
|
||||
// install modules
|
||||
for (Project module : node.config.modules) {
|
||||
String actionName = pluginTaskName('install', module.name, 'Module')
|
||||
setup = configureInstallModuleTask(taskName(task, node, actionName), project, setup, node, module)
|
||||
setup = configureInstallModuleTask(taskName(prefix, node, actionName), project, setup, node, module)
|
||||
}
|
||||
|
||||
// install plugins
|
||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
||||
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
|
||||
setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue())
|
||||
setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, plugin.getValue())
|
||||
}
|
||||
|
||||
// sets up any extra config files that need to be copied over to the ES instance;
|
||||
// its run after plugins have been installed, as the extra config files may belong to plugins
|
||||
setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
|
||||
setup = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, setup, node)
|
||||
|
||||
// extra setup commands
|
||||
for (Map.Entry<String, Object[]> command : node.config.setupCommands.entrySet()) {
|
||||
// the first argument is the actual script name, relative to home
|
||||
Object[] args = command.getValue().clone()
|
||||
args[0] = new File(node.homeDir, args[0].toString())
|
||||
setup = configureExecTask(taskName(task, node, command.getKey()), project, setup, node, args)
|
||||
setup = configureExecTask(taskName(prefix, node, command.getKey()), project, setup, node, args)
|
||||
}
|
||||
|
||||
Task start = configureStartTask(taskName(task, node, 'start'), project, setup, node)
|
||||
Task start = configureStartTask(taskName(prefix, node, 'start'), project, setup, node)
|
||||
|
||||
if (node.config.daemonize) {
|
||||
Task stop = configureStopTask(taskName(task, node, 'stop'), project, [], node)
|
||||
Task stop = configureStopTask(taskName(prefix, node, 'stop'), project, [], node)
|
||||
// if we are running in the background, make sure to stop the server when the task completes
|
||||
task.finalizedBy(stop)
|
||||
runner.finalizedBy(stop)
|
||||
start.finalizedBy(stop)
|
||||
}
|
||||
return start
|
||||
|
@ -648,11 +645,11 @@ class ClusterFormationTasks {
|
|||
}
|
||||
|
||||
/** Returns a unique task name for this task and node configuration */
|
||||
static String taskName(Task parentTask, NodeInfo node, String action) {
|
||||
static String taskName(String prefix, NodeInfo node, String action) {
|
||||
if (node.config.numNodes > 1) {
|
||||
return "${parentTask.name}#node${node.nodeNum}.${action}"
|
||||
return "${prefix}#node${node.nodeNum}.${action}"
|
||||
} else {
|
||||
return "${parentTask.name}#${action}"
|
||||
return "${prefix}#${action}"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.gradle.test
|
|||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
|
||||
/**
|
||||
* A container for the files and configuration associated with a single node in a test cluster.
|
||||
|
@ -96,17 +95,17 @@ class NodeInfo {
|
|||
/** the version of elasticsearch that this node runs */
|
||||
String nodeVersion
|
||||
|
||||
/** Creates a node to run as part of a cluster for the given task */
|
||||
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task, String nodeVersion, File sharedDir) {
|
||||
/** Holds node configuration for part of a test cluster. */
|
||||
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, String nodeVersion, File sharedDir) {
|
||||
this.config = config
|
||||
this.nodeNum = nodeNum
|
||||
this.sharedDir = sharedDir
|
||||
if (config.clusterName != null) {
|
||||
clusterName = config.clusterName
|
||||
} else {
|
||||
clusterName = "${task.path.replace(':', '_').substring(1)}"
|
||||
clusterName = project.path.replace(':', '_').substring(1) + '_' + prefix
|
||||
}
|
||||
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
|
||||
baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}")
|
||||
pidFile = new File(baseDir, 'es.pid')
|
||||
this.nodeVersion = nodeVersion
|
||||
homeDir = homeDir(baseDir, config.distribution, nodeVersion)
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.gradle.test
|
|||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
|
@ -27,12 +28,15 @@ import org.gradle.api.tasks.Input
|
|||
import org.gradle.util.ConfigureUtil
|
||||
|
||||
/**
|
||||
* Runs integration tests, but first starts an ES cluster,
|
||||
* and passes the ES cluster info as parameters to the tests.
|
||||
* A wrapper task around setting up a cluster and running rest tests.
|
||||
*/
|
||||
public class RestIntegTestTask extends RandomizedTestingTask {
|
||||
public class RestIntegTestTask extends DefaultTask {
|
||||
|
||||
ClusterConfiguration clusterConfig
|
||||
protected ClusterConfiguration clusterConfig
|
||||
|
||||
protected RandomizedTestingTask runner
|
||||
|
||||
protected Task clusterInit
|
||||
|
||||
/** Info about nodes in the integ test cluster. Note this is *not* available until runtime. */
|
||||
List<NodeInfo> nodes
|
||||
|
@ -44,35 +48,44 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
public RestIntegTestTask() {
|
||||
description = 'Runs rest tests against an elasticsearch cluster.'
|
||||
group = JavaBasePlugin.VERIFICATION_GROUP
|
||||
dependsOn(project.testClasses)
|
||||
classpath = project.sourceSets.test.runtimeClasspath
|
||||
testClassesDir = project.sourceSets.test.output.classesDir
|
||||
clusterConfig = new ClusterConfiguration(project)
|
||||
runner = project.tasks.create("${name}Runner", RandomizedTestingTask.class)
|
||||
super.dependsOn(runner)
|
||||
clusterInit = project.tasks.create(name: "${name}Cluster#init", dependsOn: project.testClasses)
|
||||
runner.dependsOn(clusterInit)
|
||||
runner.classpath = project.sourceSets.test.runtimeClasspath
|
||||
runner.testClassesDir = project.sourceSets.test.output.classesDir
|
||||
clusterConfig = project.extensions.create("${name}Cluster", ClusterConfiguration.class, project)
|
||||
|
||||
// start with the common test configuration
|
||||
configure(BuildPlugin.commonTestConfig(project))
|
||||
runner.configure(BuildPlugin.commonTestConfig(project))
|
||||
// override/add more for rest tests
|
||||
parallelism = '1'
|
||||
include('**/*IT.class')
|
||||
systemProperty('tests.rest.load_packaged', 'false')
|
||||
runner.parallelism = '1'
|
||||
runner.include('**/*IT.class')
|
||||
runner.systemProperty('tests.rest.load_packaged', 'false')
|
||||
// we pass all nodes to the rest cluster to allow the clients to round-robin between them
|
||||
// this is more realistic than just talking to a single node
|
||||
systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}")
|
||||
systemProperty('tests.config.dir', "${-> nodes[0].confDir}")
|
||||
runner.systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}")
|
||||
runner.systemProperty('tests.config.dir', "${-> nodes[0].confDir}")
|
||||
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
|
||||
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
|
||||
// both as separate sysprops
|
||||
systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
|
||||
runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
|
||||
|
||||
// copy the rest spec/tests into the test resources
|
||||
RestSpecHack.configureDependencies(project)
|
||||
project.afterEvaluate {
|
||||
dependsOn(RestSpecHack.configureTask(project, includePackaged))
|
||||
runner.dependsOn(RestSpecHack.configureTask(project, includePackaged))
|
||||
}
|
||||
// this must run after all projects have been configured, so we know any project
|
||||
// references can be accessed as a fully configured
|
||||
project.gradle.projectsEvaluated {
|
||||
nodes = ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
if (enabled == false) {
|
||||
runner.enabled = false
|
||||
clusterInit.enabled = false
|
||||
return // no need to add cluster formation tasks if the task won't run!
|
||||
}
|
||||
nodes = ClusterFormationTasks.setup(project, "${name}Cluster", runner, clusterConfig)
|
||||
super.dependsOn(runner.finalizedBy)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -84,25 +97,16 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
clusterConfig.debug = enabled;
|
||||
}
|
||||
|
||||
@Input
|
||||
public void cluster(Closure closure) {
|
||||
ConfigureUtil.configure(closure, clusterConfig)
|
||||
}
|
||||
|
||||
public ClusterConfiguration getCluster() {
|
||||
return clusterConfig
|
||||
}
|
||||
|
||||
public List<NodeInfo> getNodes() {
|
||||
return nodes
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task dependsOn(Object... dependencies) {
|
||||
super.dependsOn(dependencies)
|
||||
runner.dependsOn(dependencies)
|
||||
for (Object dependency : dependencies) {
|
||||
if (dependency instanceof Fixture) {
|
||||
finalizedBy(((Fixture)dependency).stopTask)
|
||||
runner.finalizedBy(((Fixture)dependency).stopTask)
|
||||
}
|
||||
}
|
||||
return this
|
||||
|
@ -110,11 +114,16 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
|
||||
@Override
|
||||
public void setDependsOn(Iterable<?> dependencies) {
|
||||
super.setDependsOn(dependencies)
|
||||
runner.setDependsOn(dependencies)
|
||||
for (Object dependency : dependencies) {
|
||||
if (dependency instanceof Fixture) {
|
||||
finalizedBy(((Fixture)dependency).stopTask)
|
||||
runner.finalizedBy(((Fixture)dependency).stopTask)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task mustRunAfter(Object... tasks) {
|
||||
clusterInit.mustRunAfter(tasks)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ public class RestTestPlugin implements Plugin<Project> {
|
|||
}
|
||||
|
||||
RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
|
||||
integTest.cluster.distribution = 'zip' // rest tests should run with the real zip
|
||||
integTest.clusterConfig.distribution = 'zip' // rest tests should run with the real zip
|
||||
integTest.mustRunAfter(project.precommit)
|
||||
project.check.dependsOn(integTest)
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ public class RunTask extends DefaultTask {
|
|||
clusterConfig.daemonize = false
|
||||
clusterConfig.distribution = 'zip'
|
||||
project.afterEvaluate {
|
||||
ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
ClusterFormationTasks.setup(project, name, this, clusterConfig)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,10 +16,8 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
static List<String> BOXES = [
|
||||
'centos-6',
|
||||
'centos-7',
|
||||
// TODO: re-enable debian once it does not have broken openjdk packages
|
||||
//'debian-8',
|
||||
// TODO: re-enable fedora once it does not have broken openjdk packages
|
||||
//'fedora-24',
|
||||
'debian-8',
|
||||
'fedora-24',
|
||||
'oel-6',
|
||||
'oel-7',
|
||||
'opensuse-13',
|
||||
|
|
|
@ -157,7 +157,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineTransportAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]MultiSearchRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchResponse.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]ShardSearchFailure.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportClearScrollAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportMultiSearchAction.java" checks="LineLength" />
|
||||
|
@ -452,8 +451,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]InternalReverseNested.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]ReverseNestedAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]RangeAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]InternalSampler.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]SamplerAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]GlobalOrdinalsSignificantTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsAggregatorFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsParametersParser.java" checks="LineLength" />
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
@defaultMessage Explicitly specify the ContentType of HTTP entities when creating
|
||||
org.apache.http.entity.StringEntity#<init>(java.lang.String)
|
||||
org.apache.http.entity.StringEntity#<init>(java.lang.String,java.lang.String)
|
||||
org.apache.http.entity.StringEntity#<init>(java.lang.String,java.nio.charset.Charset)
|
||||
org.apache.http.entity.ByteArrayEntity#<init>(byte[])
|
||||
org.apache.http.entity.ByteArrayEntity#<init>(byte[],int,int)
|
||||
org.apache.http.entity.FileEntity#<init>(java.io.File)
|
||||
org.apache.http.entity.InputStreamEntity#<init>(java.io.InputStream)
|
||||
org.apache.http.entity.InputStreamEntity#<init>(java.io.InputStream,long)
|
||||
org.apache.http.nio.entity.NByteArrayEntity#<init>(byte[])
|
||||
org.apache.http.nio.entity.NByteArrayEntity#<init>(byte[],int,int)
|
||||
org.apache.http.nio.entity.NFileEntity#<init>(java.io.File)
|
||||
org.apache.http.nio.entity.NStringEntity#<init>(java.lang.String)
|
||||
org.apache.http.nio.entity.NStringEntity#<init>(java.lang.String,java.lang.String)
|
||||
|
||||
@defaultMessage Use non-deprecated constructors
|
||||
org.apache.http.nio.entity.NFileEntity#<init>(java.io.File,java.lang.String)
|
||||
org.apache.http.nio.entity.NFileEntity#<init>(java.io.File,java.lang.String,boolean)
|
||||
org.apache.http.entity.FileEntity#<init>(java.io.File,java.lang.String)
|
||||
org.apache.http.entity.StringEntity#<init>(java.lang.String,java.lang.String,java.lang.String)
|
||||
|
||||
@defaultMessage BasicEntity is easy to mess up and forget to set content type
|
||||
org.apache.http.entity.BasicHttpEntity#<init>()
|
||||
|
||||
@defaultMessage EntityTemplate is easy to mess up and forget to set content type
|
||||
org.apache.http.entity.EntityTemplate#<init>(org.apache.http.entity.ContentProducer)
|
||||
|
||||
@defaultMessage SerializableEntity uses java serialization and makes it easy to forget to set content type
|
||||
org.apache.http.entity.SerializableEntity#<init>(java.io.Serializable)
|
|
@ -53,6 +53,6 @@ public class TransportNoopSearchAction extends HandledTransportAction<SearchRequ
|
|||
new SearchHit[0], 0L, 0.0f),
|
||||
new InternalAggregations(Collections.emptyList()),
|
||||
new Suggest(Collections.emptyList()),
|
||||
new SearchProfileShardResults(Collections.emptyMap()), false, false), "", 1, 1, 0, new ShardSearchFailure[0]));
|
||||
new SearchProfileShardResults(Collections.emptyMap()), false, false, 1), "", 1, 1, 0, new ShardSearchFailure[0]));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
|
@ -39,3 +41,9 @@ dependencyLicenses {
|
|||
it.group.startsWith('org.elasticsearch') == false
|
||||
}
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
// core does not depend on the httpclient for compile so we add the signatures here. We don't add them for test as they are already
|
||||
// specified
|
||||
signaturesURLs += [PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
|
||||
}
|
|
@ -30,16 +30,27 @@ import org.apache.http.entity.ContentType;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
|
@ -86,6 +97,127 @@ final class Request {
|
|||
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
}
|
||||
|
||||
static Request bulk(BulkRequest bulkRequest) throws IOException {
|
||||
Params parameters = Params.builder();
|
||||
parameters.withTimeout(bulkRequest.timeout());
|
||||
parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy());
|
||||
|
||||
// Bulk API only supports newline delimited JSON or Smile. Before executing
|
||||
// the bulk, we need to check that all requests have the same content-type
|
||||
// and this content-type is supported by the Bulk API.
|
||||
XContentType bulkContentType = null;
|
||||
for (int i = 0; i < bulkRequest.numberOfActions(); i++) {
|
||||
DocWriteRequest<?> request = bulkRequest.requests().get(i);
|
||||
|
||||
DocWriteRequest.OpType opType = request.opType();
|
||||
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
|
||||
bulkContentType = enforceSameContentType((IndexRequest) request, bulkContentType);
|
||||
|
||||
} else if (opType == DocWriteRequest.OpType.UPDATE) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) request;
|
||||
if (updateRequest.doc() != null) {
|
||||
bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType);
|
||||
}
|
||||
if (updateRequest.upsertRequest() != null) {
|
||||
bulkContentType = enforceSameContentType(updateRequest.upsertRequest(), bulkContentType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (bulkContentType == null) {
|
||||
bulkContentType = XContentType.JSON;
|
||||
}
|
||||
|
||||
byte separator = bulkContentType.xContent().streamSeparator();
|
||||
ContentType requestContentType = ContentType.create(bulkContentType.mediaType());
|
||||
|
||||
ByteArrayOutputStream content = new ByteArrayOutputStream();
|
||||
for (DocWriteRequest<?> request : bulkRequest.requests()) {
|
||||
DocWriteRequest.OpType opType = request.opType();
|
||||
|
||||
try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) {
|
||||
metadata.startObject();
|
||||
{
|
||||
metadata.startObject(opType.getLowercase());
|
||||
if (Strings.hasLength(request.index())) {
|
||||
metadata.field("_index", request.index());
|
||||
}
|
||||
if (Strings.hasLength(request.type())) {
|
||||
metadata.field("_type", request.type());
|
||||
}
|
||||
if (Strings.hasLength(request.id())) {
|
||||
metadata.field("_id", request.id());
|
||||
}
|
||||
if (Strings.hasLength(request.routing())) {
|
||||
metadata.field("_routing", request.routing());
|
||||
}
|
||||
if (Strings.hasLength(request.parent())) {
|
||||
metadata.field("_parent", request.parent());
|
||||
}
|
||||
if (request.version() != Versions.MATCH_ANY) {
|
||||
metadata.field("_version", request.version());
|
||||
}
|
||||
|
||||
VersionType versionType = request.versionType();
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
if (versionType == VersionType.EXTERNAL) {
|
||||
metadata.field("_version_type", "external");
|
||||
} else if (versionType == VersionType.EXTERNAL_GTE) {
|
||||
metadata.field("_version_type", "external_gte");
|
||||
} else if (versionType == VersionType.FORCE) {
|
||||
metadata.field("_version_type", "force");
|
||||
}
|
||||
}
|
||||
|
||||
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
if (Strings.hasLength(indexRequest.getPipeline())) {
|
||||
metadata.field("pipeline", indexRequest.getPipeline());
|
||||
}
|
||||
} else if (opType == DocWriteRequest.OpType.UPDATE) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) request;
|
||||
if (updateRequest.retryOnConflict() > 0) {
|
||||
metadata.field("_retry_on_conflict", updateRequest.retryOnConflict());
|
||||
}
|
||||
if (updateRequest.fetchSource() != null) {
|
||||
metadata.field("_source", updateRequest.fetchSource());
|
||||
}
|
||||
}
|
||||
metadata.endObject();
|
||||
}
|
||||
metadata.endObject();
|
||||
|
||||
BytesRef metadataSource = metadata.bytes().toBytesRef();
|
||||
content.write(metadataSource.bytes, metadataSource.offset, metadataSource.length);
|
||||
content.write(separator);
|
||||
}
|
||||
|
||||
BytesRef source = null;
|
||||
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
BytesReference indexSource = indexRequest.source();
|
||||
XContentType indexXContentType = indexRequest.getContentType();
|
||||
|
||||
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, indexSource, indexXContentType)) {
|
||||
try (XContentBuilder builder = XContentBuilder.builder(bulkContentType.xContent())) {
|
||||
builder.copyCurrentStructure(parser);
|
||||
source = builder.bytes().toBytesRef();
|
||||
}
|
||||
}
|
||||
} else if (opType == DocWriteRequest.OpType.UPDATE) {
|
||||
source = XContentHelper.toXContent((UpdateRequest) request, bulkContentType, false).toBytesRef();
|
||||
}
|
||||
|
||||
if (source != null) {
|
||||
content.write(source.bytes, source.offset, source.length);
|
||||
content.write(separator);
|
||||
}
|
||||
}
|
||||
|
||||
HttpEntity entity = new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType);
|
||||
return new Request(HttpPost.METHOD_NAME, "/_bulk", parameters.getParams(), entity);
|
||||
}
|
||||
|
||||
static Request exists(GetRequest getRequest) {
|
||||
Request request = get(getRequest);
|
||||
return new Request(HttpHead.METHOD_NAME, request.endpoint, request.params, null);
|
||||
|
@ -135,6 +267,48 @@ final class Request {
|
|||
return new Request("HEAD", "/", Collections.emptyMap(), null);
|
||||
}
|
||||
|
||||
static Request update(UpdateRequest updateRequest) throws IOException {
|
||||
String endpoint = endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update");
|
||||
|
||||
Params parameters = Params.builder();
|
||||
parameters.withRouting(updateRequest.routing());
|
||||
parameters.withParent(updateRequest.parent());
|
||||
parameters.withTimeout(updateRequest.timeout());
|
||||
parameters.withRefreshPolicy(updateRequest.getRefreshPolicy());
|
||||
parameters.withWaitForActiveShards(updateRequest.waitForActiveShards());
|
||||
parameters.withDocAsUpsert(updateRequest.docAsUpsert());
|
||||
parameters.withFetchSourceContext(updateRequest.fetchSource());
|
||||
parameters.withRetryOnConflict(updateRequest.retryOnConflict());
|
||||
parameters.withVersion(updateRequest.version());
|
||||
parameters.withVersionType(updateRequest.versionType());
|
||||
|
||||
// The Java API allows update requests with different content types
|
||||
// set for the partial document and the upsert document. This client
|
||||
// only accepts update requests that have the same content types set
|
||||
// for both doc and upsert.
|
||||
XContentType xContentType = null;
|
||||
if (updateRequest.doc() != null) {
|
||||
xContentType = updateRequest.doc().getContentType();
|
||||
}
|
||||
if (updateRequest.upsertRequest() != null) {
|
||||
XContentType upsertContentType = updateRequest.upsertRequest().getContentType();
|
||||
if ((xContentType != null) && (xContentType != upsertContentType)) {
|
||||
throw new IllegalStateException("Update request cannot have different content types for doc [" + xContentType + "]" +
|
||||
" and upsert [" + upsertContentType + "] documents");
|
||||
} else {
|
||||
xContentType = upsertContentType;
|
||||
}
|
||||
}
|
||||
if (xContentType == null) {
|
||||
xContentType = Requests.INDEX_CONTENT_TYPE;
|
||||
}
|
||||
|
||||
BytesRef source = XContentHelper.toXContent(updateRequest, xContentType, false).toBytesRef();
|
||||
HttpEntity entity = new ByteArrayEntity(source.bytes, source.offset, source.length, ContentType.create(xContentType.mediaType()));
|
||||
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), entity);
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility method to build request's endpoint.
|
||||
*/
|
||||
|
@ -177,6 +351,13 @@ final class Request {
|
|||
return this;
|
||||
}
|
||||
|
||||
Params withDocAsUpsert(boolean docAsUpsert) {
|
||||
if (docAsUpsert) {
|
||||
return putParam("doc_as_upsert", Boolean.TRUE.toString());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withFetchSourceContext(FetchSourceContext fetchSourceContext) {
|
||||
if (fetchSourceContext != null) {
|
||||
if (fetchSourceContext.fetchSource() == false) {
|
||||
|
@ -220,7 +401,14 @@ final class Request {
|
|||
|
||||
Params withRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) {
|
||||
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
|
||||
putParam("refresh", refreshPolicy.getValue());
|
||||
return putParam("refresh", refreshPolicy.getValue());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withRetryOnConflict(int retryOnConflict) {
|
||||
if (retryOnConflict > 0) {
|
||||
return putParam("retry_on_conflict", String.valueOf(retryOnConflict));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
@ -269,4 +457,26 @@ final class Request {
|
|||
return new Params();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure that the {@link IndexRequest}'s content type is supported by the Bulk API and that it conforms
|
||||
* to the current {@link BulkRequest}'s content type (if it's known at the time of this method get called).
|
||||
*
|
||||
* @return the {@link IndexRequest}'s content type
|
||||
*/
|
||||
static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable XContentType xContentType) {
|
||||
XContentType requestContentType = indexRequest.getContentType();
|
||||
if (requestContentType != XContentType.JSON && requestContentType != XContentType.SMILE) {
|
||||
throw new IllegalArgumentException("Unsupported content-type found for request with content-type [" + requestContentType
|
||||
+ "], only JSON and SMILE are supported");
|
||||
}
|
||||
if (xContentType == null) {
|
||||
return requestContentType;
|
||||
}
|
||||
if (requestContentType != xContentType) {
|
||||
throw new IllegalArgumentException("Mismatching content-type found for request with content-type [" + requestContentType
|
||||
+ "], previous requests have content-type [" + xContentType + "]");
|
||||
}
|
||||
return xContentType;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,11 +28,15 @@ import org.elasticsearch.action.ActionRequest;
|
|||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.main.MainRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -41,10 +45,8 @@ import org.elasticsearch.rest.BytesRestResponse;
|
|||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.singleton;
|
||||
|
@ -61,6 +63,24 @@ public class RestHighLevelClient {
|
|||
this.client = Objects.requireNonNull(client);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a bulk request using the Bulk API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||
*/
|
||||
public BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a bulk request using the Bulk API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||
*/
|
||||
public void bulkAsync(BulkRequest bulkRequest, ActionListener<BulkResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise
|
||||
*/
|
||||
|
@ -123,6 +143,28 @@ public class RestHighLevelClient {
|
|||
performRequestAsyncAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates a document using the Update API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a>
|
||||
*/
|
||||
public UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates a document using the Update API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a>
|
||||
*/
|
||||
public void updateAsync(UpdateRequest updateRequest, ActionListener<UpdateResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
private <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
Set<Integer> ignores, Header... headers) throws IOException {
|
||||
/**
|
||||
* Deletes a document by id using the Delete api
|
||||
*
|
||||
|
@ -148,9 +190,10 @@ public class RestHighLevelClient {
|
|||
return performRequest(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), ignores, headers);
|
||||
}
|
||||
|
||||
<Req extends ActionRequest, Resp> Resp performRequest(Req request, Function<Req, Request> requestConverter,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter, Set<Integer> ignores, Header... headers) throws IOException {
|
||||
|
||||
<Req extends ActionRequest, Resp> Resp performRequest(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
Set<Integer> ignores, Header... headers) throws IOException {
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
if (validationException != null) {
|
||||
throw validationException;
|
||||
|
@ -176,22 +219,31 @@ public class RestHighLevelClient {
|
|||
}
|
||||
}
|
||||
|
||||
private <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request, Function<Req, Request> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser, ActionListener<Resp> listener,
|
||||
Set<Integer> ignores, Header... headers) {
|
||||
private <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores, Header... headers) {
|
||||
performRequestAsync(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser),
|
||||
listener, ignores, headers);
|
||||
}
|
||||
|
||||
<Req extends ActionRequest, Resp> void performRequestAsync(Req request, Function<Req, Request> requestConverter,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter, ActionListener<Resp> listener,
|
||||
Set<Integer> ignores, Header... headers) {
|
||||
<Req extends ActionRequest, Resp> void performRequestAsync(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores, Header... headers) {
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
if (validationException != null) {
|
||||
listener.onFailure(validationException);
|
||||
return;
|
||||
}
|
||||
Request req = requestConverter.apply(request);
|
||||
Request req;
|
||||
try {
|
||||
req = requestConverter.apply(request);
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
|
||||
ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores);
|
||||
client.performRequestAsync(req.method, req.endpoint, req.params, req.entity, responseListener, headers);
|
||||
}
|
||||
|
|
|
@ -27,22 +27,32 @@ import org.elasticsearch.action.DocWriteResponse;
|
|||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static java.util.Collections.singletonMap;
|
||||
|
||||
public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
|
@ -142,10 +152,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "does_not_exist").version(1);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
assertEquals(RestStatus.BAD_REQUEST, exception.status());
|
||||
assertThat(exception.getMessage(), containsString("/index/type/does_not_exist?version=1: HTTP/1.1 400 Bad Request"));
|
||||
assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -344,4 +351,253 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
"version conflict, document already exists (current version [1])]", exception.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testUpdate() throws IOException {
|
||||
{
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "does_not_exist");
|
||||
updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values()));
|
||||
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () ->
|
||||
execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=document_missing_exception, reason=[type][does_not_exist]: document missing]",
|
||||
exception.getMessage());
|
||||
}
|
||||
{
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type", "id");
|
||||
indexRequest.source(singletonMap("field", "value"));
|
||||
IndexResponse indexResponse = highLevelClient().index(indexRequest);
|
||||
assertEquals(RestStatus.CREATED, indexResponse.status());
|
||||
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "id");
|
||||
updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values()));
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.OK, updateResponse.status());
|
||||
assertEquals(indexResponse.getVersion() + 1, updateResponse.getVersion());
|
||||
|
||||
UpdateRequest updateRequestConflict = new UpdateRequest("index", "type", "id");
|
||||
updateRequestConflict.doc(singletonMap("field", "with_version_conflict"), randomFrom(XContentType.values()));
|
||||
updateRequestConflict.version(indexResponse.getVersion());
|
||||
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () ->
|
||||
execute(updateRequestConflict, highLevelClient()::update, highLevelClient()::updateAsync));
|
||||
assertEquals(RestStatus.CONFLICT, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][id]: version conflict, " +
|
||||
"current version [2] is different than the one provided [1]]", exception.getMessage());
|
||||
}
|
||||
{
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> {
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "id");
|
||||
updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values()));
|
||||
if (randomBoolean()) {
|
||||
updateRequest.parent("missing");
|
||||
} else {
|
||||
updateRequest.routing("missing");
|
||||
}
|
||||
execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
});
|
||||
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=document_missing_exception, reason=[type][id]: document missing]",
|
||||
exception.getMessage());
|
||||
}
|
||||
{
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type", "with_script");
|
||||
indexRequest.source(singletonMap("counter", 12));
|
||||
IndexResponse indexResponse = highLevelClient().index(indexRequest);
|
||||
assertEquals(RestStatus.CREATED, indexResponse.status());
|
||||
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_script");
|
||||
Script script = new Script(ScriptType.INLINE, "painless", "ctx._source.counter += params.count", singletonMap("count", 8));
|
||||
updateRequest.script(script);
|
||||
updateRequest.fetchSource(true);
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.OK, updateResponse.status());
|
||||
assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
|
||||
assertEquals(2L, updateResponse.getVersion());
|
||||
assertEquals(20, updateResponse.getGetResult().sourceAsMap().get("counter"));
|
||||
|
||||
}
|
||||
{
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type", "with_doc");
|
||||
indexRequest.source("field_1", "one", "field_3", "three");
|
||||
indexRequest.version(12L);
|
||||
indexRequest.versionType(VersionType.EXTERNAL);
|
||||
IndexResponse indexResponse = highLevelClient().index(indexRequest);
|
||||
assertEquals(RestStatus.CREATED, indexResponse.status());
|
||||
assertEquals(12L, indexResponse.getVersion());
|
||||
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_doc");
|
||||
updateRequest.doc(singletonMap("field_2", "two"), randomFrom(XContentType.values()));
|
||||
updateRequest.fetchSource("field_*", "field_3");
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.OK, updateResponse.status());
|
||||
assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
|
||||
assertEquals(13L, updateResponse.getVersion());
|
||||
GetResult getResult = updateResponse.getGetResult();
|
||||
assertEquals(13L, updateResponse.getVersion());
|
||||
Map<String, Object> sourceAsMap = getResult.sourceAsMap();
|
||||
assertEquals("one", sourceAsMap.get("field_1"));
|
||||
assertEquals("two", sourceAsMap.get("field_2"));
|
||||
assertFalse(sourceAsMap.containsKey("field_3"));
|
||||
}
|
||||
{
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type", "noop");
|
||||
indexRequest.source("field", "value");
|
||||
IndexResponse indexResponse = highLevelClient().index(indexRequest);
|
||||
assertEquals(RestStatus.CREATED, indexResponse.status());
|
||||
assertEquals(1L, indexResponse.getVersion());
|
||||
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "noop");
|
||||
updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values()));
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.OK, updateResponse.status());
|
||||
assertEquals(DocWriteResponse.Result.NOOP, updateResponse.getResult());
|
||||
assertEquals(1L, updateResponse.getVersion());
|
||||
|
||||
updateRequest.detectNoop(false);
|
||||
|
||||
updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.OK, updateResponse.status());
|
||||
assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
|
||||
assertEquals(2L, updateResponse.getVersion());
|
||||
}
|
||||
{
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_upsert");
|
||||
updateRequest.upsert(singletonMap("doc_status", "created"));
|
||||
updateRequest.doc(singletonMap("doc_status", "updated"));
|
||||
updateRequest.fetchSource(true);
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.CREATED, updateResponse.status());
|
||||
assertEquals("index", updateResponse.getIndex());
|
||||
assertEquals("type", updateResponse.getType());
|
||||
assertEquals("with_upsert", updateResponse.getId());
|
||||
GetResult getResult = updateResponse.getGetResult();
|
||||
assertEquals(1L, updateResponse.getVersion());
|
||||
assertEquals("created", getResult.sourceAsMap().get("doc_status"));
|
||||
}
|
||||
{
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_doc_as_upsert");
|
||||
updateRequest.doc(singletonMap("field", "initialized"));
|
||||
updateRequest.fetchSource(true);
|
||||
updateRequest.docAsUpsert(true);
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.CREATED, updateResponse.status());
|
||||
assertEquals("index", updateResponse.getIndex());
|
||||
assertEquals("type", updateResponse.getType());
|
||||
assertEquals("with_doc_as_upsert", updateResponse.getId());
|
||||
GetResult getResult = updateResponse.getGetResult();
|
||||
assertEquals(1L, updateResponse.getVersion());
|
||||
assertEquals("initialized", getResult.sourceAsMap().get("field"));
|
||||
}
|
||||
{
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_scripted_upsert");
|
||||
updateRequest.fetchSource(true);
|
||||
updateRequest.script(new Script(ScriptType.INLINE, "painless", "ctx._source.level = params.test", singletonMap("test", "C")));
|
||||
updateRequest.scriptedUpsert(true);
|
||||
updateRequest.upsert(singletonMap("level", "A"));
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.CREATED, updateResponse.status());
|
||||
assertEquals("index", updateResponse.getIndex());
|
||||
assertEquals("type", updateResponse.getType());
|
||||
assertEquals("with_scripted_upsert", updateResponse.getId());
|
||||
|
||||
GetResult getResult = updateResponse.getGetResult();
|
||||
assertEquals(1L, updateResponse.getVersion());
|
||||
assertEquals("C", getResult.sourceAsMap().get("level"));
|
||||
}
|
||||
{
|
||||
IllegalStateException exception = expectThrows(IllegalStateException.class, () -> {
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "id");
|
||||
updateRequest.doc(new IndexRequest().source(Collections.singletonMap("field", "doc"), XContentType.JSON));
|
||||
updateRequest.upsert(new IndexRequest().source(Collections.singletonMap("field", "upsert"), XContentType.YAML));
|
||||
execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
});
|
||||
assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents",
|
||||
exception.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testBulk() throws IOException {
|
||||
int nbItems = randomIntBetween(10, 100);
|
||||
boolean[] errors = new boolean[nbItems];
|
||||
|
||||
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
|
||||
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
for (int i = 0; i < nbItems; i++) {
|
||||
String id = String.valueOf(i);
|
||||
boolean erroneous = randomBoolean();
|
||||
errors[i] = erroneous;
|
||||
|
||||
DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values());
|
||||
if (opType == DocWriteRequest.OpType.DELETE) {
|
||||
if (erroneous == false) {
|
||||
assertEquals(RestStatus.CREATED,
|
||||
highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status());
|
||||
}
|
||||
DeleteRequest deleteRequest = new DeleteRequest("index", "test", id);
|
||||
bulkRequest.add(deleteRequest);
|
||||
|
||||
} else {
|
||||
BytesReference source = XContentBuilder.builder(xContentType.xContent()).startObject().field("id", i).endObject().bytes();
|
||||
if (opType == DocWriteRequest.OpType.INDEX) {
|
||||
IndexRequest indexRequest = new IndexRequest("index", "test", id).source(source, xContentType);
|
||||
if (erroneous) {
|
||||
indexRequest.version(12L);
|
||||
}
|
||||
bulkRequest.add(indexRequest);
|
||||
|
||||
} else if (opType == DocWriteRequest.OpType.CREATE) {
|
||||
IndexRequest createRequest = new IndexRequest("index", "test", id).source(source, xContentType).create(true);
|
||||
if (erroneous) {
|
||||
assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest).status());
|
||||
}
|
||||
bulkRequest.add(createRequest);
|
||||
|
||||
} else if (opType == DocWriteRequest.OpType.UPDATE) {
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "test", id)
|
||||
.doc(new IndexRequest().source(source, xContentType));
|
||||
if (erroneous == false) {
|
||||
assertEquals(RestStatus.CREATED,
|
||||
highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status());
|
||||
}
|
||||
bulkRequest.add(updateRequest);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync);
|
||||
assertEquals(RestStatus.OK, bulkResponse.status());
|
||||
assertTrue(bulkResponse.getTookInMillis() > 0);
|
||||
assertEquals(nbItems, bulkResponse.getItems().length);
|
||||
|
||||
for (int i = 0; i < nbItems; i++) {
|
||||
BulkItemResponse bulkItemResponse = bulkResponse.getItems()[i];
|
||||
|
||||
assertEquals(i, bulkItemResponse.getItemId());
|
||||
assertEquals("index", bulkItemResponse.getIndex());
|
||||
assertEquals("test", bulkItemResponse.getType());
|
||||
assertEquals(String.valueOf(i), bulkItemResponse.getId());
|
||||
|
||||
DocWriteRequest.OpType requestOpType = bulkRequest.requests().get(i).opType();
|
||||
if (requestOpType == DocWriteRequest.OpType.INDEX || requestOpType == DocWriteRequest.OpType.CREATE) {
|
||||
assertEquals(errors[i], bulkItemResponse.isFailed());
|
||||
assertEquals(errors[i] ? RestStatus.INTERNAL_SERVER_ERROR : RestStatus.CREATED, bulkItemResponse.status());
|
||||
} else if (requestOpType == DocWriteRequest.OpType.UPDATE) {
|
||||
assertEquals(errors[i], bulkItemResponse.isFailed());
|
||||
assertEquals(errors[i] ? RestStatus.INTERNAL_SERVER_ERROR : RestStatus.OK, bulkItemResponse.status());
|
||||
} else if (requestOpType == DocWriteRequest.OpType.DELETE) {
|
||||
assertFalse(bulkItemResponse.isFailed());
|
||||
assertEquals(errors[i] ? RestStatus.NOT_FOUND : RestStatus.OK, bulkItemResponse.status());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,26 +23,40 @@ import org.apache.http.HttpEntity;
|
|||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.RandomObjects;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.client.Request.enforceSameContentType;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
|
||||
|
||||
public class RequestTests extends ESTestCase {
|
||||
|
||||
public void testPing() {
|
||||
|
@ -153,43 +167,7 @@ public class RequestTests extends ESTestCase {
|
|||
expectedParams.put("stored_fields", storedFieldsParam.toString());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
boolean fetchSource = randomBoolean();
|
||||
getRequest.fetchSourceContext(new FetchSourceContext(fetchSource));
|
||||
if (fetchSource == false) {
|
||||
expectedParams.put("_source", "false");
|
||||
}
|
||||
} else {
|
||||
int numIncludes = randomIntBetween(0, 5);
|
||||
String[] includes = new String[numIncludes];
|
||||
StringBuilder includesParam = new StringBuilder();
|
||||
for (int i = 0; i < numIncludes; i++) {
|
||||
String include = randomAsciiOfLengthBetween(3, 10);
|
||||
includes[i] = include;
|
||||
includesParam.append(include);
|
||||
if (i < numIncludes - 1) {
|
||||
includesParam.append(",");
|
||||
}
|
||||
}
|
||||
if (numIncludes > 0) {
|
||||
expectedParams.put("_source_include", includesParam.toString());
|
||||
}
|
||||
int numExcludes = randomIntBetween(0, 5);
|
||||
String[] excludes = new String[numExcludes];
|
||||
StringBuilder excludesParam = new StringBuilder();
|
||||
for (int i = 0; i < numExcludes; i++) {
|
||||
String exclude = randomAsciiOfLengthBetween(3, 10);
|
||||
excludes[i] = exclude;
|
||||
excludesParam.append(exclude);
|
||||
if (i < numExcludes - 1) {
|
||||
excludesParam.append(",");
|
||||
}
|
||||
}
|
||||
if (numExcludes > 0) {
|
||||
expectedParams.put("_source_exclude", excludesParam.toString());
|
||||
}
|
||||
getRequest.fetchSourceContext(new FetchSourceContext(true, includes, excludes));
|
||||
}
|
||||
randomizeFetchSourceContextParams(getRequest::fetchSourceContext, expectedParams);
|
||||
}
|
||||
}
|
||||
Request request = requestConverter.apply(getRequest);
|
||||
|
@ -269,6 +247,325 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testUpdate() throws IOException {
|
||||
XContentType xContentType = randomFrom(XContentType.values());
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
String index = randomAsciiOfLengthBetween(3, 10);
|
||||
String type = randomAsciiOfLengthBetween(3, 10);
|
||||
String id = randomAsciiOfLengthBetween(3, 10);
|
||||
|
||||
UpdateRequest updateRequest = new UpdateRequest(index, type, id);
|
||||
updateRequest.detectNoop(randomBoolean());
|
||||
|
||||
if (randomBoolean()) {
|
||||
BytesReference source = RandomObjects.randomSource(random(), xContentType);
|
||||
updateRequest.doc(new IndexRequest().source(source, xContentType));
|
||||
|
||||
boolean docAsUpsert = randomBoolean();
|
||||
updateRequest.docAsUpsert(docAsUpsert);
|
||||
if (docAsUpsert) {
|
||||
expectedParams.put("doc_as_upsert", "true");
|
||||
}
|
||||
} else {
|
||||
updateRequest.script(new Script("_value + 1"));
|
||||
updateRequest.scriptedUpsert(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
BytesReference source = RandomObjects.randomSource(random(), xContentType);
|
||||
updateRequest.upsert(new IndexRequest().source(source, xContentType));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String routing = randomAsciiOfLengthBetween(3, 10);
|
||||
updateRequest.routing(routing);
|
||||
expectedParams.put("routing", routing);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String parent = randomAsciiOfLengthBetween(3, 10);
|
||||
updateRequest.parent(parent);
|
||||
expectedParams.put("parent", parent);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String timeout = randomTimeValue();
|
||||
updateRequest.timeout(timeout);
|
||||
expectedParams.put("timeout", timeout);
|
||||
} else {
|
||||
expectedParams.put("timeout", ReplicationRequest.DEFAULT_TIMEOUT.getStringRep());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
|
||||
updateRequest.setRefreshPolicy(refreshPolicy);
|
||||
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
|
||||
expectedParams.put("refresh", refreshPolicy.getValue());
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int waitForActiveShards = randomIntBetween(0, 10);
|
||||
updateRequest.waitForActiveShards(waitForActiveShards);
|
||||
expectedParams.put("wait_for_active_shards", String.valueOf(waitForActiveShards));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
long version = randomLong();
|
||||
updateRequest.version(version);
|
||||
if (version != Versions.MATCH_ANY) {
|
||||
expectedParams.put("version", Long.toString(version));
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
VersionType versionType = randomFrom(VersionType.values());
|
||||
updateRequest.versionType(versionType);
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int retryOnConflict = randomIntBetween(0, 5);
|
||||
updateRequest.retryOnConflict(retryOnConflict);
|
||||
if (retryOnConflict > 0) {
|
||||
expectedParams.put("retry_on_conflict", String.valueOf(retryOnConflict));
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
randomizeFetchSourceContextParams(updateRequest::fetchSource, expectedParams);
|
||||
}
|
||||
|
||||
Request request = Request.update(updateRequest);
|
||||
assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.endpoint);
|
||||
assertEquals(expectedParams, request.params);
|
||||
assertEquals("POST", request.method);
|
||||
|
||||
HttpEntity entity = request.entity;
|
||||
assertNotNull(entity);
|
||||
assertTrue(entity instanceof ByteArrayEntity);
|
||||
|
||||
UpdateRequest parsedUpdateRequest = new UpdateRequest();
|
||||
|
||||
XContentType entityContentType = XContentType.fromMediaTypeOrFormat(entity.getContentType().getValue());
|
||||
try (XContentParser parser = createParser(entityContentType.xContent(), entity.getContent())) {
|
||||
parsedUpdateRequest.fromXContent(parser);
|
||||
}
|
||||
|
||||
assertEquals(updateRequest.scriptedUpsert(), parsedUpdateRequest.scriptedUpsert());
|
||||
assertEquals(updateRequest.docAsUpsert(), parsedUpdateRequest.docAsUpsert());
|
||||
assertEquals(updateRequest.detectNoop(), parsedUpdateRequest.detectNoop());
|
||||
assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource());
|
||||
assertEquals(updateRequest.script(), parsedUpdateRequest.script());
|
||||
if (updateRequest.doc() != null) {
|
||||
assertToXContentEquivalent(updateRequest.doc().source(), parsedUpdateRequest.doc().source(), xContentType);
|
||||
} else {
|
||||
assertNull(parsedUpdateRequest.doc());
|
||||
}
|
||||
if (updateRequest.upsertRequest() != null) {
|
||||
assertToXContentEquivalent(updateRequest.upsertRequest().source(), parsedUpdateRequest.upsertRequest().source(), xContentType);
|
||||
} else {
|
||||
assertNull(parsedUpdateRequest.upsertRequest());
|
||||
}
|
||||
}
|
||||
|
||||
public void testUpdateWithDifferentContentTypes() throws IOException {
|
||||
IllegalStateException exception = expectThrows(IllegalStateException.class, () -> {
|
||||
UpdateRequest updateRequest = new UpdateRequest();
|
||||
updateRequest.doc(new IndexRequest().source(singletonMap("field", "doc"), XContentType.JSON));
|
||||
updateRequest.upsert(new IndexRequest().source(singletonMap("field", "upsert"), XContentType.YAML));
|
||||
Request.update(updateRequest);
|
||||
});
|
||||
assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents",
|
||||
exception.getMessage());
|
||||
}
|
||||
|
||||
public void testBulk() throws IOException {
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
if (randomBoolean()) {
|
||||
String timeout = randomTimeValue();
|
||||
bulkRequest.timeout(timeout);
|
||||
expectedParams.put("timeout", timeout);
|
||||
} else {
|
||||
expectedParams.put("timeout", BulkShardRequest.DEFAULT_TIMEOUT.getStringRep());
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
|
||||
bulkRequest.setRefreshPolicy(refreshPolicy);
|
||||
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
|
||||
expectedParams.put("refresh", refreshPolicy.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
|
||||
|
||||
int nbItems = randomIntBetween(10, 100);
|
||||
for (int i = 0; i < nbItems; i++) {
|
||||
String index = randomAsciiOfLength(5);
|
||||
String type = randomAsciiOfLength(5);
|
||||
String id = randomAsciiOfLength(5);
|
||||
|
||||
BytesReference source = RandomObjects.randomSource(random(), xContentType);
|
||||
DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values());
|
||||
|
||||
DocWriteRequest<?> docWriteRequest = null;
|
||||
if (opType == DocWriteRequest.OpType.INDEX) {
|
||||
IndexRequest indexRequest = new IndexRequest(index, type, id).source(source, xContentType);
|
||||
docWriteRequest = indexRequest;
|
||||
if (randomBoolean()) {
|
||||
indexRequest.setPipeline(randomAsciiOfLength(5));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
indexRequest.parent(randomAsciiOfLength(5));
|
||||
}
|
||||
} else if (opType == DocWriteRequest.OpType.CREATE) {
|
||||
IndexRequest createRequest = new IndexRequest(index, type, id).source(source, xContentType).create(true);
|
||||
docWriteRequest = createRequest;
|
||||
if (randomBoolean()) {
|
||||
createRequest.parent(randomAsciiOfLength(5));
|
||||
}
|
||||
} else if (opType == DocWriteRequest.OpType.UPDATE) {
|
||||
final UpdateRequest updateRequest = new UpdateRequest(index, type, id).doc(new IndexRequest().source(source, xContentType));
|
||||
docWriteRequest = updateRequest;
|
||||
if (randomBoolean()) {
|
||||
updateRequest.retryOnConflict(randomIntBetween(1, 5));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
randomizeFetchSourceContextParams(updateRequest::fetchSource, new HashMap<>());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
updateRequest.parent(randomAsciiOfLength(5));
|
||||
}
|
||||
} else if (opType == DocWriteRequest.OpType.DELETE) {
|
||||
docWriteRequest = new DeleteRequest(index, type, id);
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
docWriteRequest.routing(randomAsciiOfLength(10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
docWriteRequest.version(randomNonNegativeLong());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
docWriteRequest.versionType(randomFrom(VersionType.values()));
|
||||
}
|
||||
bulkRequest.add(docWriteRequest);
|
||||
}
|
||||
|
||||
Request request = Request.bulk(bulkRequest);
|
||||
assertEquals("/_bulk", request.endpoint);
|
||||
assertEquals(expectedParams, request.params);
|
||||
assertEquals("POST", request.method);
|
||||
|
||||
byte[] content = new byte[(int) request.entity.getContentLength()];
|
||||
try (InputStream inputStream = request.entity.getContent()) {
|
||||
Streams.readFully(inputStream, content);
|
||||
}
|
||||
|
||||
BulkRequest parsedBulkRequest = new BulkRequest();
|
||||
parsedBulkRequest.add(content, 0, content.length, xContentType);
|
||||
assertEquals(bulkRequest.numberOfActions(), parsedBulkRequest.numberOfActions());
|
||||
|
||||
for (int i = 0; i < bulkRequest.numberOfActions(); i++) {
|
||||
DocWriteRequest<?> originalRequest = bulkRequest.requests().get(i);
|
||||
DocWriteRequest<?> parsedRequest = parsedBulkRequest.requests().get(i);
|
||||
|
||||
assertEquals(originalRequest.opType(), parsedRequest.opType());
|
||||
assertEquals(originalRequest.index(), parsedRequest.index());
|
||||
assertEquals(originalRequest.type(), parsedRequest.type());
|
||||
assertEquals(originalRequest.id(), parsedRequest.id());
|
||||
assertEquals(originalRequest.routing(), parsedRequest.routing());
|
||||
assertEquals(originalRequest.parent(), parsedRequest.parent());
|
||||
assertEquals(originalRequest.version(), parsedRequest.version());
|
||||
assertEquals(originalRequest.versionType(), parsedRequest.versionType());
|
||||
|
||||
DocWriteRequest.OpType opType = originalRequest.opType();
|
||||
if (opType == DocWriteRequest.OpType.INDEX) {
|
||||
IndexRequest indexRequest = (IndexRequest) originalRequest;
|
||||
IndexRequest parsedIndexRequest = (IndexRequest) parsedRequest;
|
||||
|
||||
assertEquals(indexRequest.getPipeline(), parsedIndexRequest.getPipeline());
|
||||
assertToXContentEquivalent(indexRequest.source(), parsedIndexRequest.source(), xContentType);
|
||||
} else if (opType == DocWriteRequest.OpType.UPDATE) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) originalRequest;
|
||||
UpdateRequest parsedUpdateRequest = (UpdateRequest) parsedRequest;
|
||||
|
||||
assertEquals(updateRequest.retryOnConflict(), parsedUpdateRequest.retryOnConflict());
|
||||
assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource());
|
||||
if (updateRequest.doc() != null) {
|
||||
assertToXContentEquivalent(updateRequest.doc().source(), parsedUpdateRequest.doc().source(), xContentType);
|
||||
} else {
|
||||
assertNull(parsedUpdateRequest.doc());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testBulkWithDifferentContentTypes() throws IOException {
|
||||
{
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new DeleteRequest("index", "type", "0"));
|
||||
bulkRequest.add(new UpdateRequest("index", "type", "1").script(new Script("test")));
|
||||
bulkRequest.add(new DeleteRequest("index", "type", "2"));
|
||||
|
||||
Request request = Request.bulk(bulkRequest);
|
||||
assertEquals(XContentType.JSON.mediaType(), request.entity.getContentType().getValue());
|
||||
}
|
||||
{
|
||||
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new DeleteRequest("index", "type", "0"));
|
||||
bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), xContentType));
|
||||
bulkRequest.add(new DeleteRequest("index", "type", "2"));
|
||||
|
||||
Request request = Request.bulk(bulkRequest);
|
||||
assertEquals(xContentType.mediaType(), request.entity.getContentType().getValue());
|
||||
}
|
||||
{
|
||||
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "0");
|
||||
if (randomBoolean()) {
|
||||
updateRequest.doc(new IndexRequest().source(singletonMap("field", "value"), xContentType));
|
||||
} else {
|
||||
updateRequest.upsert(new IndexRequest().source(singletonMap("field", "value"), xContentType));
|
||||
}
|
||||
|
||||
Request request = Request.bulk(new BulkRequest().add(updateRequest));
|
||||
assertEquals(xContentType.mediaType(), request.entity.getContentType().getValue());
|
||||
}
|
||||
{
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE));
|
||||
bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON));
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest));
|
||||
assertEquals("Mismatching content-type found for request with content-type [JSON], " +
|
||||
"previous requests have content-type [SMILE]", exception.getMessage());
|
||||
}
|
||||
{
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest("index", "type", "0")
|
||||
.source(singletonMap("field", "value"), XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("index", "type", "1")
|
||||
.source(singletonMap("field", "value"), XContentType.JSON));
|
||||
bulkRequest.add(new UpdateRequest("index", "type", "2")
|
||||
.doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON))
|
||||
.upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE))
|
||||
);
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest));
|
||||
assertEquals("Mismatching content-type found for request with content-type [SMILE], " +
|
||||
"previous requests have content-type [JSON]", exception.getMessage());
|
||||
}
|
||||
{
|
||||
XContentType xContentType = randomFrom(XContentType.CBOR, XContentType.YAML);
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new DeleteRequest("index", "type", "0"));
|
||||
bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON));
|
||||
bulkRequest.add(new DeleteRequest("index", "type", "2"));
|
||||
bulkRequest.add(new DeleteRequest("index", "type", "3"));
|
||||
bulkRequest.add(new IndexRequest("index", "type", "4").source(singletonMap("field", "value"), XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), xContentType));
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest));
|
||||
assertEquals("Unsupported content-type found for request with content-type [" + xContentType
|
||||
+ "], only JSON and SMILE are supported", exception.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testParams() {
|
||||
final int nbParams = randomIntBetween(0, 10);
|
||||
Request.Params params = Request.Params.builder();
|
||||
|
@ -304,6 +601,78 @@ public class RequestTests extends ESTestCase {
|
|||
assertEquals("/a/b", Request.endpoint("a", "b"));
|
||||
assertEquals("/a/b/_create", Request.endpoint("a", "b", "_create"));
|
||||
assertEquals("/a/b/c/_create", Request.endpoint("a", "b", "c", "_create"));
|
||||
assertEquals("/a/_create", Request.endpoint("a", null, null, "_create"));
|
||||
}
|
||||
|
||||
public void testEnforceSameContentType() {
|
||||
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
|
||||
IndexRequest indexRequest = new IndexRequest().source(singletonMap("field", "value"), xContentType);
|
||||
assertEquals(xContentType, enforceSameContentType(indexRequest, null));
|
||||
assertEquals(xContentType, enforceSameContentType(indexRequest, xContentType));
|
||||
|
||||
XContentType bulkContentType = randomBoolean() ? xContentType : null;
|
||||
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () ->
|
||||
enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.CBOR), bulkContentType));
|
||||
assertEquals("Unsupported content-type found for request with content-type [CBOR], only JSON and SMILE are supported",
|
||||
exception.getMessage());
|
||||
|
||||
exception = expectThrows(IllegalArgumentException.class, () ->
|
||||
enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.YAML), bulkContentType));
|
||||
assertEquals("Unsupported content-type found for request with content-type [YAML], only JSON and SMILE are supported",
|
||||
exception.getMessage());
|
||||
|
||||
XContentType requestContentType = xContentType == XContentType.JSON ? XContentType.SMILE : XContentType.JSON;
|
||||
|
||||
exception = expectThrows(IllegalArgumentException.class, () ->
|
||||
enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType));
|
||||
assertEquals("Mismatching content-type found for request with content-type [" + requestContentType + "], "
|
||||
+ "previous requests have content-type [" + xContentType + "]", exception.getMessage());
|
||||
}
|
||||
|
||||
/**
|
||||
* Randomize the {@link FetchSourceContext} request parameters.
|
||||
*/
|
||||
private static void randomizeFetchSourceContextParams(Consumer<FetchSourceContext> consumer, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
boolean fetchSource = randomBoolean();
|
||||
consumer.accept(new FetchSourceContext(fetchSource));
|
||||
if (fetchSource == false) {
|
||||
expectedParams.put("_source", "false");
|
||||
}
|
||||
} else {
|
||||
int numIncludes = randomIntBetween(0, 5);
|
||||
String[] includes = new String[numIncludes];
|
||||
StringBuilder includesParam = new StringBuilder();
|
||||
for (int i = 0; i < numIncludes; i++) {
|
||||
String include = randomAsciiOfLengthBetween(3, 10);
|
||||
includes[i] = include;
|
||||
includesParam.append(include);
|
||||
if (i < numIncludes - 1) {
|
||||
includesParam.append(",");
|
||||
}
|
||||
}
|
||||
if (numIncludes > 0) {
|
||||
expectedParams.put("_source_include", includesParam.toString());
|
||||
}
|
||||
int numExcludes = randomIntBetween(0, 5);
|
||||
String[] excludes = new String[numExcludes];
|
||||
StringBuilder excludesParam = new StringBuilder();
|
||||
for (int i = 0; i < numExcludes; i++) {
|
||||
String exclude = randomAsciiOfLengthBetween(3, 10);
|
||||
excludes[i] = exclude;
|
||||
excludesParam.append(exclude);
|
||||
if (i < numExcludes - 1) {
|
||||
excludesParam.append(",");
|
||||
}
|
||||
}
|
||||
if (numExcludes > 0) {
|
||||
expectedParams.put("_source_exclude", excludesParam.toString());
|
||||
}
|
||||
consumer.accept(new FetchSourceContext(true, includes, excludes));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void enrichReplicationRequest(ReplicatedWriteRequest request, Map<String, String> expectedParams) {
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.http.HttpResponse;
|
|||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.entity.BasicHttpEntity;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
|
@ -144,7 +143,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
}
|
||||
{
|
||||
IllegalStateException ise = expectThrows(IllegalStateException.class,
|
||||
() -> RestHighLevelClient.parseEntity(new BasicHttpEntity(), null));
|
||||
() -> RestHighLevelClient.parseEntity(new StringEntity("", (ContentType) null), null));
|
||||
assertEquals("Elasticsearch didn't return the [Content-Type] header, unable to parse response body", ise.getMessage());
|
||||
}
|
||||
{
|
||||
|
@ -240,7 +239,8 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnSuccess() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
|
@ -261,7 +261,8 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
|
@ -278,7 +279,8 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithEntity() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}",
|
||||
|
@ -297,7 +299,8 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON));
|
||||
|
@ -316,7 +319,8 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON));
|
||||
|
@ -335,7 +339,8 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
|
@ -348,7 +353,8 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
|
@ -364,7 +370,8 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}",
|
||||
ContentType.APPLICATION_JSON));
|
||||
|
|
|
@ -49,8 +49,9 @@ dependencies {
|
|||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
//client does not depend on core, so only jdk signatures should be checked
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
//client does not depend on core, so only jdk and http signatures should be checked
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
|
||||
PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
|
||||
}
|
||||
|
||||
forbiddenApisTest {
|
||||
|
@ -58,7 +59,8 @@ forbiddenApisTest {
|
|||
bundledSignatures -= 'jdk-non-portable'
|
||||
bundledSignatures += 'jdk-internal'
|
||||
//client does not depend on core, so only jdk signatures should be checked
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
|
||||
PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.ContentTooLongException;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.StatusLine;
|
||||
|
@ -32,6 +33,8 @@ import org.apache.http.nio.ContentDecoder;
|
|||
import org.apache.http.nio.IOControl;
|
||||
import org.apache.http.protocol.HttpContext;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
@ -56,7 +59,7 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase {
|
|||
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
|
||||
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
|
||||
HttpResponse httpResponse = new BasicHttpResponse(statusLine);
|
||||
httpResponse.setEntity(new StringEntity("test"));
|
||||
httpResponse.setEntity(new StringEntity("test", ContentType.TEXT_PLAIN));
|
||||
|
||||
//everything goes well
|
||||
consumer.responseReceived(httpResponse);
|
||||
|
@ -99,11 +102,17 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase {
|
|||
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
|
||||
consumer.onResponseReceived(new BasicHttpResponse(statusLine));
|
||||
|
||||
BasicHttpEntity entity = new BasicHttpEntity();
|
||||
entity.setContentLength(randomInt(bufferLimit));
|
||||
final AtomicReference<Long> contentLength = new AtomicReference<>();
|
||||
HttpEntity entity = new StringEntity("", ContentType.APPLICATION_JSON) {
|
||||
@Override
|
||||
public long getContentLength() {
|
||||
return contentLength.get();
|
||||
}
|
||||
};
|
||||
contentLength.set(randomLong(bufferLimit));
|
||||
consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON);
|
||||
|
||||
entity.setContentLength(randomIntBetween(bufferLimit + 1, MAX_TEST_BUFFER_SIZE));
|
||||
contentLength.set(randomLongBetween(bufferLimit + 1, MAX_TEST_BUFFER_SIZE));
|
||||
try {
|
||||
consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON);
|
||||
} catch(ContentTooLongException e) {
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.http.client.methods.HttpPost;
|
|||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.apache.http.client.methods.HttpTrace;
|
||||
import org.apache.http.client.methods.HttpUriRequest;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.InputStreamEntity;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
|
@ -71,20 +72,21 @@ public class RequestLoggerTests extends RestClientTestCase {
|
|||
HttpEntity entity;
|
||||
switch(randomIntBetween(0, 4)) {
|
||||
case 0:
|
||||
entity = new StringEntity(requestBody, StandardCharsets.UTF_8);
|
||||
entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON);
|
||||
break;
|
||||
case 1:
|
||||
entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8)));
|
||||
entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8)),
|
||||
ContentType.APPLICATION_JSON);
|
||||
break;
|
||||
case 2:
|
||||
entity = new NStringEntity(requestBody, StandardCharsets.UTF_8);
|
||||
entity = new NStringEntity(requestBody, ContentType.APPLICATION_JSON);
|
||||
break;
|
||||
case 3:
|
||||
entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8));
|
||||
entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON);
|
||||
break;
|
||||
case 4:
|
||||
// Evil entity without a charset
|
||||
entity = new StringEntity(requestBody, (Charset) null);
|
||||
entity = new StringEntity(requestBody, ContentType.create("application/json", (Charset) null));
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException();
|
||||
|
@ -122,15 +124,16 @@ public class RequestLoggerTests extends RestClientTestCase {
|
|||
HttpEntity entity;
|
||||
switch(randomIntBetween(0, 2)) {
|
||||
case 0:
|
||||
entity = new StringEntity(responseBody, StandardCharsets.UTF_8);
|
||||
entity = new StringEntity(responseBody, ContentType.APPLICATION_JSON);
|
||||
break;
|
||||
case 1:
|
||||
//test a non repeatable entity
|
||||
entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)));
|
||||
entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)),
|
||||
ContentType.APPLICATION_JSON);
|
||||
break;
|
||||
case 2:
|
||||
// Evil entity without a charset
|
||||
entity = new StringEntity(responseBody, (Charset) null);
|
||||
entity = new StringEntity(responseBody, ContentType.create("application/json", (Charset) null));
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException();
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.http.HttpResponse;
|
|||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.InputStreamEntity;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
|
@ -52,10 +53,11 @@ public class ResponseExceptionTests extends RestClientTestCase {
|
|||
if (hasBody) {
|
||||
HttpEntity entity;
|
||||
if (getRandom().nextBoolean()) {
|
||||
entity = new StringEntity(responseBody, StandardCharsets.UTF_8);
|
||||
entity = new StringEntity(responseBody, ContentType.APPLICATION_JSON);
|
||||
} else {
|
||||
//test a non repeatable entity
|
||||
entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)));
|
||||
entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)),
|
||||
ContentType.APPLICATION_JSON);
|
||||
}
|
||||
httpResponse.setEntity(entity);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.http.Header;
|
|||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.auth.AuthScope;
|
||||
import org.apache.http.auth.UsernamePasswordCredentials;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.client.BasicCredentialsProvider;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
|
@ -249,7 +250,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
|||
|
||||
private Response bodyTest(final RestClient restClient, final String method) throws IOException {
|
||||
String requestBody = "{ \"field\": \"value\" }";
|
||||
StringEntity entity = new StringEntity(requestBody);
|
||||
StringEntity entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON);
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
Response esResponse;
|
||||
try {
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.http.client.protocol.HttpClientContext;
|
|||
import org.apache.http.client.utils.URIBuilder;
|
||||
import org.apache.http.concurrent.FutureCallback;
|
||||
import org.apache.http.conn.ConnectTimeoutException;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.auth.BasicScheme;
|
||||
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
|
@ -293,7 +294,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
*/
|
||||
public void testBody() throws IOException {
|
||||
String body = "{ \"field\": \"value\" }";
|
||||
StringEntity entity = new StringEntity(body);
|
||||
StringEntity entity = new StringEntity(body, ContentType.APPLICATION_JSON);
|
||||
for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) {
|
||||
for (int okStatusCode : getOkStatusCodes()) {
|
||||
Response response = restClient.performRequest(method, "/" + okStatusCode, Collections.<String, String>emptyMap(), entity);
|
||||
|
@ -431,7 +432,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
HttpEntity entity = null;
|
||||
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
|
||||
if (hasBody) {
|
||||
entity = new StringEntity(randomAsciiOfLengthBetween(10, 100));
|
||||
entity = new StringEntity(randomAsciiOfLengthBetween(10, 100), ContentType.APPLICATION_JSON);
|
||||
((HttpEntityEnclosingRequest) request).setEntity(entity);
|
||||
}
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ dependencies {
|
|||
// percentiles aggregation
|
||||
compile 'com.tdunning:t-digest:3.0'
|
||||
// precentil ranks aggregation
|
||||
compile 'org.hdrhistogram:HdrHistogram:2.1.6'
|
||||
compile 'org.hdrhistogram:HdrHistogram:2.1.9'
|
||||
|
||||
// lucene spatial
|
||||
compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
7495feb7f71ee124bd2a7e7d83590e296d71d80e
|
|
@ -0,0 +1 @@
|
|||
e4631ce165eb400edecfa32e03d3f1be53dee754
|
|
@ -586,7 +586,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
return new ElasticsearchException(buildMessage("exception", parser.text(), null));
|
||||
}
|
||||
|
||||
ensureExpectedToken(token, XContentParser.Token.START_OBJECT, parser::getTokenLocation);
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
|
||||
token = parser.nextToken();
|
||||
|
||||
// Root causes are parsed in the innerFromXContent() and are added as suppressed exceptions.
|
||||
|
|
|
@ -288,7 +288,6 @@ import org.elasticsearch.rest.action.document.RestBulkAction;
|
|||
import org.elasticsearch.rest.action.document.RestDeleteAction;
|
||||
import org.elasticsearch.rest.action.document.RestGetAction;
|
||||
import org.elasticsearch.rest.action.document.RestGetSourceAction;
|
||||
import org.elasticsearch.rest.action.document.RestHeadAction;
|
||||
import org.elasticsearch.rest.action.document.RestIndexAction;
|
||||
import org.elasticsearch.rest.action.document.RestMultiGetAction;
|
||||
import org.elasticsearch.rest.action.document.RestMultiTermVectorsAction;
|
||||
|
@ -563,7 +562,6 @@ public class ActionModule extends AbstractModule {
|
|||
registerHandler.accept(new RestIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestGetAction(settings, restController));
|
||||
registerHandler.accept(new RestGetSourceAction(settings, restController));
|
||||
registerHandler.accept(new RestHeadAction.Document(settings, restController));
|
||||
registerHandler.accept(new RestMultiGetAction(settings, restController));
|
||||
registerHandler.accept(new RestDeleteAction(settings, restController));
|
||||
registerHandler.accept(new org.elasticsearch.rest.action.document.RestCountAction(settings, restController));
|
||||
|
|
|
@ -23,15 +23,15 @@ import org.elasticsearch.action.support.WriteRequest;
|
|||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
import org.elasticsearch.action.support.WriteResponse;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -39,14 +39,12 @@ import org.elasticsearch.rest.RestStatus;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URLEncoder;
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken;
|
||||
|
||||
/**
|
||||
* A base class for the response of a write operation that involves a single doc
|
||||
|
@ -199,6 +197,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
|
|||
}
|
||||
|
||||
/** returns the rest status for this response (based on {@link ShardInfo#status()} */
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
return getShardInfo().status();
|
||||
}
|
||||
|
@ -299,16 +298,115 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
|
|||
}
|
||||
|
||||
/**
|
||||
* Declare the {@link ObjectParser} fields to use when parsing a {@link DocWriteResponse}
|
||||
* Parse the output of the {@link #innerToXContent(XContentBuilder, Params)} method.
|
||||
*
|
||||
* This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning
|
||||
* {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly
|
||||
* if needed and then immediately returns.
|
||||
*/
|
||||
protected static void declareParserFields(ConstructingObjectParser<? extends DocWriteResponse, Void> objParser) {
|
||||
objParser.declareString(constructorArg(), new ParseField(_INDEX));
|
||||
objParser.declareString(constructorArg(), new ParseField(_TYPE));
|
||||
objParser.declareString(constructorArg(), new ParseField(_ID));
|
||||
objParser.declareLong(constructorArg(), new ParseField(_VERSION));
|
||||
objParser.declareString(constructorArg(), new ParseField(RESULT));
|
||||
objParser.declareObject(optionalConstructorArg(), (p, c) -> ShardInfo.fromXContent(p), new ParseField(_SHARDS));
|
||||
objParser.declareLong(optionalConstructorArg(), new ParseField(_SEQ_NO));
|
||||
objParser.declareBoolean(DocWriteResponse::setForcedRefresh, new ParseField(FORCED_REFRESH));
|
||||
protected static void parseInnerToXContent(XContentParser parser, Builder context) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
|
||||
|
||||
String currentFieldName = parser.currentName();
|
||||
token = parser.nextToken();
|
||||
|
||||
if (token.isValue()) {
|
||||
if (_INDEX.equals(currentFieldName)) {
|
||||
// index uuid and shard id are unknown and can't be parsed back for now.
|
||||
context.setShardId(new ShardId(new Index(parser.text(), IndexMetaData.INDEX_UUID_NA_VALUE), -1));
|
||||
} else if (_TYPE.equals(currentFieldName)) {
|
||||
context.setType(parser.text());
|
||||
} else if (_ID.equals(currentFieldName)) {
|
||||
context.setId(parser.text());
|
||||
} else if (_VERSION.equals(currentFieldName)) {
|
||||
context.setVersion(parser.longValue());
|
||||
} else if (RESULT.equals(currentFieldName)) {
|
||||
String result = parser.text();
|
||||
for (Result r : Result.values()) {
|
||||
if (r.getLowercase().equals(result)) {
|
||||
context.setResult(r);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (FORCED_REFRESH.equals(currentFieldName)) {
|
||||
context.setForcedRefresh(parser.booleanValue());
|
||||
} else if (_SEQ_NO.equals(currentFieldName)) {
|
||||
context.setSeqNo(parser.longValue());
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (_SHARDS.equals(currentFieldName)) {
|
||||
context.setShardInfo(ShardInfo.fromXContent(parser));
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
}
|
||||
} else {
|
||||
throwUnknownToken(token, parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Base class of all {@link DocWriteResponse} builders. These {@link DocWriteResponse.Builder} are used during
|
||||
* xcontent parsing to temporarily store the parsed values, then the {@link Builder#build()} method is called to
|
||||
* instantiate the appropriate {@link DocWriteResponse} with the parsed values.
|
||||
*/
|
||||
public abstract static class Builder {
|
||||
|
||||
protected ShardId shardId = null;
|
||||
protected String type = null;
|
||||
protected String id = null;
|
||||
protected Long version = null;
|
||||
protected Result result = null;
|
||||
protected boolean forcedRefresh;
|
||||
protected ShardInfo shardInfo = null;
|
||||
protected Long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
|
||||
public ShardId getShardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
public void setShardId(ShardId shardId) {
|
||||
this.shardId = shardId;
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public void setType(String type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(String id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public void setVersion(Long version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public void setResult(Result result) {
|
||||
this.result = result;
|
||||
}
|
||||
|
||||
public void setForcedRefresh(boolean forcedRefresh) {
|
||||
this.forcedRefresh = forcedRefresh;
|
||||
}
|
||||
|
||||
public void setShardInfo(ShardInfo shardInfo) {
|
||||
this.shardInfo = shardInfo;
|
||||
}
|
||||
|
||||
public void setSeqNo(Long seqNo) {
|
||||
this.seqNo = seqNo;
|
||||
}
|
||||
|
||||
public abstract DocWriteResponse build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -139,19 +139,6 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the repository settings.
|
||||
*
|
||||
* @param source repository settings in json or yaml format
|
||||
* @return this request
|
||||
* @deprecated use {@link #settings(String, XContentType)} to avoid content type auto-detection
|
||||
*/
|
||||
@Deprecated
|
||||
public PutRepositoryRequest settings(String source) {
|
||||
this.settings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the repository settings.
|
||||
*
|
||||
|
|
|
@ -89,19 +89,6 @@ public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder<PutR
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the repository settings in Json or Yaml format
|
||||
*
|
||||
* @param source repository settings
|
||||
* @return this builder
|
||||
* @deprecated use {@link #setSettings(String, XContentType)} instead to avoid content type auto detection
|
||||
*/
|
||||
@Deprecated
|
||||
public PutRepositoryRequestBuilder setSettings(String source) {
|
||||
request.settings(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the repository settings in Json or Yaml format
|
||||
*
|
||||
|
|
|
@ -81,16 +81,6 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
|
||||
* @deprecated use {@link #transientSettings(String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public ClusterUpdateSettingsRequest transientSettings(String source) {
|
||||
this.transientSettings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
|
||||
*/
|
||||
|
@ -130,16 +120,6 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source containing the persistent settings to be updated. They will get applied cross restarts
|
||||
* @deprecated use {@link #persistentSettings(String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public ClusterUpdateSettingsRequest persistentSettings(String source) {
|
||||
this.persistentSettings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source containing the persistent settings to be updated. They will get applied cross restarts
|
||||
*/
|
||||
|
|
|
@ -51,16 +51,6 @@ public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuil
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
|
||||
* @deprecated use {@link #setTransientSettings(String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public ClusterUpdateSettingsRequestBuilder setTransientSettings(String settings) {
|
||||
request.transientSettings(settings);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
|
||||
*/
|
||||
|
@ -93,16 +83,6 @@ public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuil
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source containing the persistent settings to be updated. They will get applied cross restarts
|
||||
* @deprecated use {@link #setPersistentSettings(String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public ClusterUpdateSettingsRequestBuilder setPersistentSettings(String settings) {
|
||||
request.persistentSettings(settings);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source containing the persistent settings to be updated. They will get applied cross restarts
|
||||
*/
|
||||
|
|
|
@ -287,21 +287,6 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets repository-specific snapshot settings in JSON or YAML format
|
||||
* <p>
|
||||
* See repository documentation for more information.
|
||||
*
|
||||
* @param source repository-specific snapshot settings
|
||||
* @return this request
|
||||
* @deprecated use {@link #settings(String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateSnapshotRequest settings(String source) {
|
||||
this.settings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets repository-specific snapshot settings in JSON or YAML format
|
||||
* <p>
|
||||
|
|
|
@ -141,21 +141,6 @@ public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuil
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets repository-specific snapshot settings in YAML, JSON or properties format
|
||||
* <p>
|
||||
* See repository documentation for more information.
|
||||
*
|
||||
* @param source repository-specific snapshot settings
|
||||
* @return this builder
|
||||
* @deprecated use {@link #setSettings(String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateSnapshotRequestBuilder setSettings(String source) {
|
||||
request.settings(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets repository-specific snapshot settings in YAML or JSON format
|
||||
* <p>
|
||||
|
|
|
@ -312,21 +312,6 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets repository-specific restore settings in JSON or YAML format
|
||||
* <p>
|
||||
* See repository documentation for more information.
|
||||
*
|
||||
* @param source repository-specific snapshot settings
|
||||
* @return this request
|
||||
* @deprecated use {@link #settings(String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public RestoreSnapshotRequest settings(String source) {
|
||||
this.settings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets repository-specific restore settings in JSON or YAML format
|
||||
* <p>
|
||||
|
@ -450,16 +435,6 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets settings that should be added/changed in all restored indices
|
||||
* @deprecated use {@link #indexSettings(String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public RestoreSnapshotRequest indexSettings(String source) {
|
||||
this.indexSettings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets settings that should be added/changed in all restored indices
|
||||
*/
|
||||
|
|
|
@ -153,21 +153,6 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets repository-specific restore settings in JSON or YAML format
|
||||
* <p>
|
||||
* See repository documentation for more information.
|
||||
*
|
||||
* @param source repository-specific snapshot settings
|
||||
* @return this builder
|
||||
* @deprecated use {@link #setSettings(String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public RestoreSnapshotRequestBuilder setSettings(String source) {
|
||||
request.settings(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets repository-specific restore settings in JSON or YAML format
|
||||
* <p>
|
||||
|
@ -263,19 +248,6 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets index settings that should be added or replaced during restore
|
||||
*
|
||||
* @param source index settings
|
||||
* @return this builder
|
||||
* @deprecated use {@link #setIndexSettings(String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public RestoreSnapshotRequestBuilder setIndexSettings(String source) {
|
||||
request.indexSettings(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets index settings that should be added or replaced during restore
|
||||
*
|
||||
|
|
|
@ -45,11 +45,6 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
|||
super();
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public PutStoredScriptRequest(String id, String lang, BytesReference content) {
|
||||
this(id, lang, content, XContentFactory.xContentType(content));
|
||||
}
|
||||
|
||||
public PutStoredScriptRequest(String id, String lang, BytesReference content, XContentType xContentType) {
|
||||
super();
|
||||
this.id = id;
|
||||
|
@ -107,15 +102,6 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
|||
return xContentType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the script source using bytes.
|
||||
* @deprecated this method is deprecated as it relies on content type detection. Use {@link #content(BytesReference, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutStoredScriptRequest content(BytesReference content) {
|
||||
return content(content, XContentFactory.xContentType(content));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the script source and the content type of the bytes.
|
||||
*/
|
||||
|
|
|
@ -36,16 +36,6 @@ public class PutStoredScriptRequestBuilder extends AcknowledgedRequestBuilder<Pu
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the source of the script.
|
||||
* @deprecated this method requires content type detection. Use {@link #setContent(BytesReference, XContentType)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public PutStoredScriptRequestBuilder setContent(BytesReference content) {
|
||||
request.content(content);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the source of the script along with the content type of the source
|
||||
*/
|
||||
|
|
|
@ -170,16 +170,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The settings to create the index with (either json or yaml format)
|
||||
* @deprecated use {@link #source(String, XContentType)} instead to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateIndexRequest settings(String source) {
|
||||
this.settings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The settings to create the index with (either json or yaml format)
|
||||
*/
|
||||
|
@ -215,18 +205,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds mapping that will be added when the index gets created.
|
||||
*
|
||||
* @param type The mapping type
|
||||
* @param source The mapping source
|
||||
* @deprecated use {@link #mapping(String, String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateIndexRequest mapping(String type, String source) {
|
||||
return mapping(type, new BytesArray(source), XContentFactory.xContentType(source));
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds mapping that will be added when the index gets created.
|
||||
*
|
||||
|
@ -362,15 +340,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
* @deprecated use {@link #source(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateIndexRequest source(String source) {
|
||||
return source(new BytesArray(source));
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
*/
|
||||
|
@ -382,16 +351,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
* Sets the settings and mappings as a single source.
|
||||
*/
|
||||
public CreateIndexRequest source(XContentBuilder source) {
|
||||
return source(source.bytes());
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
* @deprecated use {@link #source(byte[], XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateIndexRequest source(byte[] source) {
|
||||
return source(source, 0, source.length);
|
||||
return source(source.bytes(), source.contentType());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -401,15 +361,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
return source(source, 0, source.length, xContentType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
* @deprecated use {@link #source(byte[], int, int, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateIndexRequest source(byte[] source, int offset, int length) {
|
||||
return source(new BytesArray(source, offset, length));
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
*/
|
||||
|
@ -417,17 +368,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
return source(new BytesArray(source, offset, length), xContentType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
* @deprecated use {@link #source(BytesReference, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateIndexRequest source(BytesReference source) {
|
||||
XContentType xContentType = XContentFactory.xContentType(source);
|
||||
source(source, xContentType);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
*/
|
||||
|
|
|
@ -110,19 +110,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds mapping that will be added when the index gets created.
|
||||
*
|
||||
* @param type The mapping type
|
||||
* @param source The mapping source
|
||||
* @deprecated use {@link #addMapping(String, String, XContentType)} to avoid content type auto-detection
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateIndexRequestBuilder addMapping(String type, String source) {
|
||||
request.mapping(type, source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds mapping that will be added when the index gets created.
|
||||
*
|
||||
|
@ -214,16 +201,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
* @deprecated use {@link #setSource(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateIndexRequestBuilder setSource(String source) {
|
||||
request.source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
*/
|
||||
|
@ -232,16 +209,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
* @deprecated use {@link #setSource(BytesReference, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateIndexRequestBuilder setSource(BytesReference source) {
|
||||
request.source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
*/
|
||||
|
@ -250,16 +217,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
* @deprecated use {@link #setSource(byte[], XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateIndexRequestBuilder setSource(byte[] source) {
|
||||
request.source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
*/
|
||||
|
@ -268,16 +225,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
* @deprecated use {@link #setSource(byte[], int, int, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateIndexRequestBuilder setSource(byte[] source, int offset, int length) {
|
||||
request.source(source, offset, length);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings and mappings as a single source.
|
||||
*/
|
||||
|
|
|
@ -270,15 +270,6 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The mapping source definition.
|
||||
* @deprecated use {@link #source(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutMappingRequest source(String mappingSource) {
|
||||
return source(mappingSource, XContentFactory.xContentType(mappingSource));
|
||||
}
|
||||
|
||||
/**
|
||||
* The mapping source definition.
|
||||
*/
|
||||
|
|
|
@ -81,16 +81,6 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder<PutMapp
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The mapping source definition.
|
||||
* @deprecated use {@link #setSource(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutMappingRequestBuilder setSource(String mappingSource) {
|
||||
request.source(mappingSource);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The mapping source definition.
|
||||
*/
|
||||
|
|
|
@ -120,16 +120,6 @@ public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsReq
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings to be updated (either json or yaml format)
|
||||
* @deprecated use {@link #settings(String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateSettingsRequest settings(String source) {
|
||||
this.settings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings to be updated (either json or yaml format)
|
||||
*/
|
||||
|
|
|
@ -70,16 +70,6 @@ public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder<Upd
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings to be updated (either json or yaml format)
|
||||
* @deprecated use {@link #setSettings(String, XContentType)} to avoid content type detection
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateSettingsRequestBuilder setSettings(String source) {
|
||||
request.settings(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the settings to be updated (either json or yaml format)
|
||||
*/
|
||||
|
|
|
@ -180,16 +180,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The settings to create the index template with (either json/yaml format).
|
||||
* @deprecated use {@link #settings(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutIndexTemplateRequest settings(String source) {
|
||||
this.settings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The settings to create the index template with (either json/yaml format).
|
||||
*/
|
||||
|
@ -216,19 +206,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
return this.settings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds mapping that will be added when the index gets created.
|
||||
*
|
||||
* @param type The mapping type
|
||||
* @param source The mapping source
|
||||
* @deprecated use {@link #mapping(String, String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutIndexTemplateRequest mapping(String type, String source) {
|
||||
XContentType xContentType = XContentFactory.xContentType(source);
|
||||
return mapping(type, source, xContentType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds mapping that will be added when the index gets created.
|
||||
*
|
||||
|
@ -385,15 +362,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
* @deprecated use {@link #source(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutIndexTemplateRequest source(String templateSource) {
|
||||
return source(XContentHelper.convertToMap(XContentFactory.xContent(templateSource), templateSource, true));
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
*/
|
||||
|
@ -401,15 +369,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
return source(XContentHelper.convertToMap(xContentType.xContent(), templateSource, true));
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
* @deprecated use {@link #source(byte[], XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutIndexTemplateRequest source(byte[] source) {
|
||||
return source(source, 0, source.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
*/
|
||||
|
@ -417,15 +376,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
return source(source, 0, source.length, xContentType);
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
* @deprecated use {@link #source(byte[], int, int, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutIndexTemplateRequest source(byte[] source, int offset, int length) {
|
||||
return source(new BytesArray(source, offset, length));
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
*/
|
||||
|
@ -433,15 +383,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
return source(new BytesArray(source, offset, length), xContentType);
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
* @deprecated use {@link #source(BytesReference, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutIndexTemplateRequest source(BytesReference source) {
|
||||
return source(XContentHelper.convertToMap(source, true).v2());
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
*/
|
||||
|
|
|
@ -100,16 +100,6 @@ public class PutIndexTemplateRequestBuilder
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The settings to crete the index template with (either json or yaml format)
|
||||
* @deprecated use {@link #setSettings(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutIndexTemplateRequestBuilder setSettings(String source) {
|
||||
request.settings(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The settings to crete the index template with (either json or yaml format)
|
||||
*/
|
||||
|
@ -126,19 +116,6 @@ public class PutIndexTemplateRequestBuilder
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds mapping that will be added when the index template gets created.
|
||||
*
|
||||
* @param type The mapping type
|
||||
* @param source The mapping source
|
||||
* @deprecated use {@link #addMapping(String, String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutIndexTemplateRequestBuilder addMapping(String type, String source) {
|
||||
request.mapping(type, source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds mapping that will be added when the index template gets created.
|
||||
*
|
||||
|
@ -249,16 +226,6 @@ public class PutIndexTemplateRequestBuilder
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
* @deprecated use {@link #setSource(BytesReference, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutIndexTemplateRequestBuilder setSource(String templateSource) {
|
||||
request.source(templateSource);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
*/
|
||||
|
@ -267,26 +234,6 @@ public class PutIndexTemplateRequestBuilder
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
* @deprecated use {@link #setSource(BytesReference, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutIndexTemplateRequestBuilder setSource(BytesReference templateSource) {
|
||||
request.source(templateSource);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
* @deprecated use {@link #setSource(byte[], XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutIndexTemplateRequestBuilder setSource(byte[] templateSource) {
|
||||
request.source(templateSource);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
*/
|
||||
|
@ -295,16 +242,6 @@ public class PutIndexTemplateRequestBuilder
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
* @deprecated use {@link #setSource(byte[], int, int, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutIndexTemplateRequestBuilder setSource(byte[] templateSource, int offset, int length) {
|
||||
request.source(templateSource, offset, length);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The template source definition.
|
||||
*/
|
||||
|
|
|
@ -19,7 +19,9 @@
|
|||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -31,13 +33,12 @@ public class BulkItemRequest implements Streamable {
|
|||
private int id;
|
||||
private DocWriteRequest request;
|
||||
private volatile BulkItemResponse primaryResponse;
|
||||
private volatile boolean ignoreOnReplica;
|
||||
|
||||
BulkItemRequest() {
|
||||
|
||||
}
|
||||
|
||||
public BulkItemRequest(int id, DocWriteRequest request) {
|
||||
protected BulkItemRequest(int id, DocWriteRequest request) {
|
||||
this.id = id;
|
||||
this.request = request;
|
||||
}
|
||||
|
@ -55,25 +56,16 @@ public class BulkItemRequest implements Streamable {
|
|||
return request.indices()[0];
|
||||
}
|
||||
|
||||
BulkItemResponse getPrimaryResponse() {
|
||||
// NOTE: protected for testing only
|
||||
protected BulkItemResponse getPrimaryResponse() {
|
||||
return primaryResponse;
|
||||
}
|
||||
|
||||
void setPrimaryResponse(BulkItemResponse primaryResponse) {
|
||||
// NOTE: protected for testing only
|
||||
protected void setPrimaryResponse(BulkItemResponse primaryResponse) {
|
||||
this.primaryResponse = primaryResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks this request to be ignored and *not* execute on a replica.
|
||||
*/
|
||||
void setIgnoreOnReplica() {
|
||||
this.ignoreOnReplica = true;
|
||||
}
|
||||
|
||||
boolean isIgnoreOnReplica() {
|
||||
return ignoreOnReplica;
|
||||
}
|
||||
|
||||
public static BulkItemRequest readBulkItem(StreamInput in) throws IOException {
|
||||
BulkItemRequest item = new BulkItemRequest();
|
||||
item.readFrom(in);
|
||||
|
@ -87,14 +79,37 @@ public class BulkItemRequest implements Streamable {
|
|||
if (in.readBoolean()) {
|
||||
primaryResponse = BulkItemResponse.readBulkItem(in);
|
||||
}
|
||||
ignoreOnReplica = in.readBoolean();
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported
|
||||
boolean ignoreOnReplica = in.readBoolean();
|
||||
if (ignoreOnReplica == false && primaryResponse != null) {
|
||||
assert primaryResponse.isFailed() == false : "expected no failure on the primary response";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(id);
|
||||
DocWriteRequest.writeDocumentRequest(out, request);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported
|
||||
// old nodes expect updated version and version type on the request
|
||||
if (primaryResponse != null) {
|
||||
request.version(primaryResponse.getVersion());
|
||||
request.versionType(request.versionType().versionTypeForReplicationAndRecovery());
|
||||
DocWriteRequest.writeDocumentRequest(out, request);
|
||||
} else {
|
||||
DocWriteRequest.writeDocumentRequest(out, request);
|
||||
}
|
||||
} else {
|
||||
DocWriteRequest.writeDocumentRequest(out, request);
|
||||
}
|
||||
out.writeOptionalStreamable(primaryResponse);
|
||||
out.writeBoolean(ignoreOnReplica);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported
|
||||
if (primaryResponse != null) {
|
||||
out.writeBoolean(primaryResponse.isFailed()
|
||||
|| primaryResponse.getResponse().getResult() == DocWriteResponse.Result.NOOP);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.action.DocWriteResponse;
|
|||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.common.CheckedConsumer;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -35,16 +36,26 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
|
||||
|
||||
/**
|
||||
* Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id
|
||||
* of the relevant action, and if it has failed or not (with the failure message incase it failed).
|
||||
*/
|
||||
public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
||||
|
||||
private static final String _INDEX = "_index";
|
||||
private static final String _TYPE = "_type";
|
||||
private static final String _ID = "_id";
|
||||
private static final String STATUS = "status";
|
||||
private static final String ERROR = "error";
|
||||
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
return failure == null ? response.status() : failure.getStatus();
|
||||
|
@ -56,13 +67,13 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
|||
builder.startObject(opType.getLowercase());
|
||||
if (failure == null) {
|
||||
response.innerToXContent(builder, params);
|
||||
builder.field(Fields.STATUS, response.status().getStatus());
|
||||
builder.field(STATUS, response.status().getStatus());
|
||||
} else {
|
||||
builder.field(Fields._INDEX, failure.getIndex());
|
||||
builder.field(Fields._TYPE, failure.getType());
|
||||
builder.field(Fields._ID, failure.getId());
|
||||
builder.field(Fields.STATUS, failure.getStatus().getStatus());
|
||||
builder.startObject(Fields.ERROR);
|
||||
builder.field(_INDEX, failure.getIndex());
|
||||
builder.field(_TYPE, failure.getType());
|
||||
builder.field(_ID, failure.getId());
|
||||
builder.field(STATUS, failure.getStatus().getStatus());
|
||||
builder.startObject(ERROR);
|
||||
ElasticsearchException.generateThrowableXContent(builder, params, failure.getCause());
|
||||
builder.endObject();
|
||||
}
|
||||
|
@ -71,12 +82,73 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
|||
return builder;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String _INDEX = "_index";
|
||||
static final String _TYPE = "_type";
|
||||
static final String _ID = "_id";
|
||||
static final String STATUS = "status";
|
||||
static final String ERROR = "error";
|
||||
/**
|
||||
* Reads a {@link BulkItemResponse} from a {@link XContentParser}.
|
||||
*
|
||||
* @param parser the {@link XContentParser}
|
||||
* @param id the id to assign to the parsed {@link BulkItemResponse}. It is usually the index of
|
||||
* the item in the {@link BulkResponse#getItems} array.
|
||||
*/
|
||||
public static BulkItemResponse fromXContent(XContentParser parser, int id) throws IOException {
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation);
|
||||
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
|
||||
|
||||
String currentFieldName = parser.currentName();
|
||||
token = parser.nextToken();
|
||||
|
||||
final OpType opType = OpType.fromString(currentFieldName);
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
|
||||
|
||||
DocWriteResponse.Builder builder = null;
|
||||
CheckedConsumer<XContentParser, IOException> itemParser = null;
|
||||
|
||||
if (opType == OpType.INDEX || opType == OpType.CREATE) {
|
||||
final IndexResponse.Builder indexResponseBuilder = new IndexResponse.Builder();
|
||||
builder = indexResponseBuilder;
|
||||
itemParser = (indexParser) -> IndexResponse.parseXContentFields(indexParser, indexResponseBuilder);
|
||||
|
||||
} else if (opType == OpType.UPDATE) {
|
||||
final UpdateResponse.Builder updateResponseBuilder = new UpdateResponse.Builder();
|
||||
builder = updateResponseBuilder;
|
||||
itemParser = (updateParser) -> UpdateResponse.parseXContentFields(updateParser, updateResponseBuilder);
|
||||
|
||||
} else if (opType == OpType.DELETE) {
|
||||
final DeleteResponse.Builder deleteResponseBuilder = new DeleteResponse.Builder();
|
||||
builder = deleteResponseBuilder;
|
||||
itemParser = (deleteParser) -> DeleteResponse.parseXContentFields(deleteParser, deleteResponseBuilder);
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
}
|
||||
|
||||
ElasticsearchException exception = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
}
|
||||
|
||||
if (ERROR.equals(currentFieldName)) {
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
exception = ElasticsearchException.fromXContent(parser);
|
||||
}
|
||||
} else if (STATUS.equals(currentFieldName) == false) {
|
||||
itemParser.accept(parser);
|
||||
}
|
||||
}
|
||||
|
||||
ensureExpectedToken(XContentParser.Token.END_OBJECT, token, parser::getTokenLocation);
|
||||
token = parser.nextToken();
|
||||
ensureExpectedToken(XContentParser.Token.END_OBJECT, token, parser::getTokenLocation);
|
||||
|
||||
BulkItemResponse bulkItemResponse;
|
||||
if (exception != null) {
|
||||
Failure failure = new Failure(builder.getShardId().getIndexName(), builder.getType(), builder.getId(), exception);
|
||||
bulkItemResponse = new BulkItemResponse(id, opType, failure);
|
||||
} else {
|
||||
bulkItemResponse = new BulkItemResponse(id, opType, builder.build());
|
||||
}
|
||||
return bulkItemResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -289,15 +289,6 @@ public class BulkProcessor implements Closeable {
|
|||
executeIfNeeded();
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the data from the bytes to be processed by the bulk processor
|
||||
* @deprecated use {@link #add(BytesReference, String, String, XContentType)} instead to avoid content type auto-detection
|
||||
*/
|
||||
@Deprecated
|
||||
public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
|
||||
return add(data, defaultIndex, defaultType, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the data from the bytes to be processed by the bulk processor
|
||||
*/
|
||||
|
@ -306,19 +297,6 @@ public class BulkProcessor implements Closeable {
|
|||
return add(data, defaultIndex, defaultType, null, null, xContentType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the data from the bytes to be processed by the bulk processor
|
||||
* @deprecated use {@link #add(BytesReference, String, String, String, Object, XContentType)} instead to avoid content type
|
||||
* auto-detection
|
||||
*/
|
||||
@Deprecated
|
||||
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType,
|
||||
@Nullable String defaultPipeline, @Nullable Object payload) throws Exception {
|
||||
bulkRequest.add(data, defaultIndex, defaultType, null, null, null, defaultPipeline, payload, true);
|
||||
executeIfNeeded();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the data from the bytes to be processed by the bulk processor
|
||||
*/
|
||||
|
|
|
@ -243,15 +243,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
return sizeInBytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
* @deprecated use {@link #add(byte[], int, int, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public BulkRequest add(byte[] data, int from, int length) throws IOException {
|
||||
return add(data, from, length, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
|
@ -259,15 +250,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
return add(data, from, length, null, null, xContentType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
* @deprecated use {@link #add(byte[], int, int, String, String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public BulkRequest add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType) throws IOException {
|
||||
return add(new BytesArray(data, from, length), defaultIndex, defaultType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
|
@ -276,16 +258,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
return add(new BytesArray(data, from, length), defaultIndex, defaultType, xContentType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*
|
||||
* @deprecated use {@link #add(BytesReference, String, String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws IOException {
|
||||
return add(data, defaultIndex, defaultType, null, null, null, null, null, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
|
@ -294,16 +266,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
return add(data, defaultIndex, defaultType, null, null, null, null, null, true, xContentType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*
|
||||
* @deprecated use {@link #add(BytesReference, String, String, boolean, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws IOException {
|
||||
return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
|
@ -312,13 +274,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex, xContentType);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws IOException {
|
||||
XContentType xContentType = XContentFactory.xContentType(data);
|
||||
return add(data, defaultIndex, defaultType, defaultRouting, defaultFields, defaultFetchSourceContext, defaultPipeline, payload,
|
||||
allowExplicitIndex, xContentType);
|
||||
}
|
||||
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String
|
||||
defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String
|
||||
defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex, XContentType xContentType) throws IOException {
|
||||
|
@ -432,7 +387,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
}
|
||||
line++;
|
||||
|
||||
// order is important, we set parent after routing, so routing will be set to parent if not set explicitly
|
||||
// we use internalAdd so we don't fork here, this allows us not to copy over the big byte array to small chunks
|
||||
// of index request.
|
||||
if ("index".equals(action)) {
|
||||
|
|
|
@ -96,16 +96,6 @@ public class BulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkRe
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
* @deprecated use {@link #add(byte[], int, int, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public BulkRequestBuilder add(byte[] data, int from, int length) throws Exception {
|
||||
request.add(data, from, length, null, null);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
|
@ -114,16 +104,6 @@ public class BulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkRe
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
* @deprecated use {@link #add(byte[], int, int, String, String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public BulkRequestBuilder add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
|
||||
request.add(data, from, length, defaultIndex, defaultType);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
|
|
|
@ -23,17 +23,32 @@ import org.elasticsearch.action.ActionResponse;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken;
|
||||
|
||||
/**
|
||||
* A response of a bulk execution. Holding a response for each item responding (in order) of the
|
||||
* bulk requests. Each item holds the index/type/id is operated on, and if it failed or not (with the
|
||||
* failure message).
|
||||
*/
|
||||
public class BulkResponse extends ActionResponse implements Iterable<BulkItemResponse> {
|
||||
public class BulkResponse extends ActionResponse implements Iterable<BulkItemResponse>, StatusToXContentObject {
|
||||
|
||||
private static final String ITEMS = "items";
|
||||
private static final String ERRORS = "errors";
|
||||
private static final String TOOK = "took";
|
||||
private static final String INGEST_TOOK = "ingest_took";
|
||||
|
||||
public static final long NO_INGEST_TOOK = -1L;
|
||||
|
||||
|
@ -141,4 +156,61 @@ public class BulkResponse extends ActionResponse implements Iterable<BulkItemRes
|
|||
out.writeVLong(tookInMillis);
|
||||
out.writeZLong(ingestTookInMillis);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
return RestStatus.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(TOOK, tookInMillis);
|
||||
if (ingestTookInMillis != BulkResponse.NO_INGEST_TOOK) {
|
||||
builder.field(INGEST_TOOK, ingestTookInMillis);
|
||||
}
|
||||
builder.field(ERRORS, hasFailures());
|
||||
builder.startArray(ITEMS);
|
||||
for (BulkItemResponse item : this) {
|
||||
item.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static BulkResponse fromXContent(XContentParser parser) throws IOException {
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
|
||||
|
||||
long took = -1L;
|
||||
long ingestTook = NO_INGEST_TOOK;
|
||||
List<BulkItemResponse> items = new ArrayList<>();
|
||||
|
||||
String currentFieldName = parser.currentName();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (TOOK.equals(currentFieldName)) {
|
||||
took = parser.longValue();
|
||||
} else if (INGEST_TOOK.equals(currentFieldName)) {
|
||||
ingestTook = parser.longValue();
|
||||
} else if (ERRORS.equals(currentFieldName) == false) {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if (ITEMS.equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
items.add(BulkItemResponse.fromXContent(parser, items.size()));
|
||||
}
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
}
|
||||
} else {
|
||||
throwUnknownToken(token, parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
return new BulkResponse(items.toArray(new BulkItemResponse[items.size()]), took, ingestTook);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
|
|||
public BulkShardRequest() {
|
||||
}
|
||||
|
||||
BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) {
|
||||
public BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) {
|
||||
super(shardId);
|
||||
this.items = items;
|
||||
setRefreshPolicy(refreshPolicy);
|
||||
|
|
|
@ -36,7 +36,8 @@ public class BulkShardResponse extends ReplicationResponse implements WriteRespo
|
|||
BulkShardResponse() {
|
||||
}
|
||||
|
||||
BulkShardResponse(ShardId shardId, BulkItemResponse[] responses) {
|
||||
// NOTE: public for testing only
|
||||
public BulkShardResponse(ShardId shardId, BulkItemResponse[] responses) {
|
||||
this.shardId = shardId;
|
||||
this.responses = responses;
|
||||
}
|
||||
|
|
|
@ -104,14 +104,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
public WritePrimaryResult<BulkShardRequest, BulkShardResponse> shardOperationOnPrimary(
|
||||
BulkShardRequest request, IndexShard primary) throws Exception {
|
||||
final IndexMetaData metaData = primary.indexSettings().getIndexMetaData();
|
||||
|
||||
long[] preVersions = new long[request.items().length];
|
||||
VersionType[] preVersionTypes = new VersionType[request.items().length];
|
||||
Translog.Location location = null;
|
||||
for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) {
|
||||
location = executeBulkItemRequest(metaData, primary, request, preVersions, preVersionTypes, location, requestIndex);
|
||||
location = executeBulkItemRequest(metaData, primary, request, location, requestIndex);
|
||||
}
|
||||
|
||||
BulkItemResponse[] responses = new BulkItemResponse[request.items().length];
|
||||
BulkItemRequest[] items = request.items();
|
||||
for (int i = 0; i < items.length; i++) {
|
||||
|
@ -124,110 +120,73 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
/** Executes bulk item requests and handles request execution exceptions */
|
||||
private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard primary,
|
||||
BulkShardRequest request,
|
||||
long[] preVersions, VersionType[] preVersionTypes,
|
||||
Translog.Location location, int requestIndex) throws Exception {
|
||||
final DocWriteRequest itemRequest = request.items()[requestIndex].request();
|
||||
preVersions[requestIndex] = itemRequest.version();
|
||||
preVersionTypes[requestIndex] = itemRequest.versionType();
|
||||
DocWriteRequest.OpType opType = itemRequest.opType();
|
||||
try {
|
||||
// execute item request
|
||||
final Engine.Result operationResult;
|
||||
final DocWriteResponse response;
|
||||
final BulkItemRequest replicaRequest;
|
||||
switch (itemRequest.opType()) {
|
||||
case CREATE:
|
||||
case INDEX:
|
||||
final IndexRequest indexRequest = (IndexRequest) itemRequest;
|
||||
Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
|
||||
if (indexResult.hasFailure()) {
|
||||
response = null;
|
||||
} else {
|
||||
// update the version on request so it will happen on the replicas
|
||||
final long version = indexResult.getVersion();
|
||||
indexRequest.version(version);
|
||||
indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery());
|
||||
indexRequest.setSeqNo(indexResult.getSeqNo());
|
||||
assert indexRequest.versionType().validateVersionForWrites(indexRequest.version());
|
||||
response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(),
|
||||
indexResult.getVersion(), indexResult.isCreated());
|
||||
}
|
||||
operationResult = indexResult;
|
||||
replicaRequest = request.items()[requestIndex];
|
||||
break;
|
||||
case UPDATE:
|
||||
UpdateResultHolder updateResultHolder = executeUpdateRequest(((UpdateRequest) itemRequest),
|
||||
primary, metaData, request, requestIndex);
|
||||
operationResult = updateResultHolder.operationResult;
|
||||
response = updateResultHolder.response;
|
||||
replicaRequest = updateResultHolder.replicaRequest;
|
||||
break;
|
||||
case DELETE:
|
||||
final DeleteRequest deleteRequest = (DeleteRequest) itemRequest;
|
||||
Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary);
|
||||
if (deleteResult.hasFailure()) {
|
||||
response = null;
|
||||
} else {
|
||||
// update the request with the version so it will go to the replicas
|
||||
deleteRequest.versionType(deleteRequest.versionType().versionTypeForReplicationAndRecovery());
|
||||
deleteRequest.version(deleteResult.getVersion());
|
||||
deleteRequest.setSeqNo(deleteResult.getSeqNo());
|
||||
assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version());
|
||||
response = new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(),
|
||||
deleteResult.getVersion(), deleteResult.isFound());
|
||||
}
|
||||
operationResult = deleteResult;
|
||||
replicaRequest = request.items()[requestIndex];
|
||||
break;
|
||||
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
|
||||
}
|
||||
|
||||
// update the bulk item request because update request execution can mutate the bulk item request
|
||||
request.items()[requestIndex] = replicaRequest;
|
||||
if (operationResult == null) { // in case of noop update operation
|
||||
assert response.getResult() == DocWriteResponse.Result.NOOP
|
||||
: "only noop update can have null operation";
|
||||
replicaRequest.setIgnoreOnReplica();
|
||||
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response));
|
||||
} else if (operationResult.hasFailure() == false) {
|
||||
location = locationToSync(location, operationResult.getTranslogLocation());
|
||||
BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response);
|
||||
replicaRequest.setPrimaryResponse(primaryResponse);
|
||||
// set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though.
|
||||
primaryResponse.getResponse().setShardInfo(new ShardInfo());
|
||||
} else {
|
||||
DocWriteRequest docWriteRequest = replicaRequest.request();
|
||||
Exception failure = operationResult.getFailure();
|
||||
if (isConflictException(failure)) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
||||
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
|
||||
} else {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
||||
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
|
||||
}
|
||||
// if its a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
|
||||
// then just use the response we got from the successful execution
|
||||
if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) {
|
||||
replicaRequest.setIgnoreOnReplica();
|
||||
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(),
|
||||
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)));
|
||||
}
|
||||
}
|
||||
assert replicaRequest.getPrimaryResponse() != null;
|
||||
assert preVersionTypes[requestIndex] != null;
|
||||
} catch (Exception e) {
|
||||
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
|
||||
if (retryPrimaryException(e)) {
|
||||
// restore updated versions...
|
||||
for (int j = 0; j < requestIndex; j++) {
|
||||
DocWriteRequest docWriteRequest = request.items()[j].request();
|
||||
docWriteRequest.version(preVersions[j]);
|
||||
docWriteRequest.versionType(preVersionTypes[j]);
|
||||
}
|
||||
}
|
||||
throw e;
|
||||
final DocWriteRequest.OpType opType = itemRequest.opType();
|
||||
final Engine.Result operationResult;
|
||||
final DocWriteResponse response;
|
||||
final BulkItemRequest replicaRequest;
|
||||
switch (itemRequest.opType()) {
|
||||
case CREATE:
|
||||
case INDEX:
|
||||
final IndexRequest indexRequest = (IndexRequest) itemRequest;
|
||||
Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
|
||||
response = indexResult.hasFailure() ? null :
|
||||
new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(),
|
||||
indexResult.getVersion(), indexResult.isCreated());
|
||||
operationResult = indexResult;
|
||||
replicaRequest = request.items()[requestIndex];
|
||||
break;
|
||||
case UPDATE:
|
||||
UpdateResultHolder updateResultHolder = executeUpdateRequest(((UpdateRequest) itemRequest),
|
||||
primary, metaData, request, requestIndex);
|
||||
operationResult = updateResultHolder.operationResult;
|
||||
response = updateResultHolder.response;
|
||||
replicaRequest = updateResultHolder.replicaRequest;
|
||||
break;
|
||||
case DELETE:
|
||||
final DeleteRequest deleteRequest = (DeleteRequest) itemRequest;
|
||||
Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary);
|
||||
response = deleteResult.hasFailure() ? null :
|
||||
new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(),
|
||||
deleteResult.getVersion(), deleteResult.isFound());
|
||||
operationResult = deleteResult;
|
||||
replicaRequest = request.items()[requestIndex];
|
||||
break;
|
||||
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
|
||||
}
|
||||
|
||||
// update the bulk item request because update request execution can mutate the bulk item request
|
||||
request.items()[requestIndex] = replicaRequest;
|
||||
if (operationResult == null) { // in case of noop update operation
|
||||
assert response.getResult() == DocWriteResponse.Result.NOOP
|
||||
: "only noop update can have null operation";
|
||||
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response));
|
||||
} else if (operationResult.hasFailure() == false) {
|
||||
location = locationToSync(location, operationResult.getTranslogLocation());
|
||||
BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response);
|
||||
replicaRequest.setPrimaryResponse(primaryResponse);
|
||||
// set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though.
|
||||
primaryResponse.getResponse().setShardInfo(new ShardInfo());
|
||||
} else {
|
||||
DocWriteRequest docWriteRequest = replicaRequest.request();
|
||||
Exception failure = operationResult.getFailure();
|
||||
if (isConflictException(failure)) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
||||
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
|
||||
} else {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
||||
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
|
||||
}
|
||||
// if its a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
|
||||
// then just use the response we got from the successful execution
|
||||
if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) {
|
||||
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(),
|
||||
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)));
|
||||
}
|
||||
}
|
||||
assert replicaRequest.getPrimaryResponse() != null;
|
||||
return location;
|
||||
}
|
||||
|
||||
|
@ -266,7 +225,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
final UpdateHelper.Result translate;
|
||||
// translate update request
|
||||
try {
|
||||
translate = updateHelper.prepare(updateRequest, primary, threadPool::estimatedTimeInMillis);
|
||||
translate = updateHelper.prepare(updateRequest, primary, threadPool::absoluteTimeInMillis);
|
||||
} catch (Exception failure) {
|
||||
// we may fail translating a update to index or delete operation
|
||||
// we use index result to communicate failure while translating update request
|
||||
|
@ -281,25 +240,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
|
||||
indexRequest.process(mappingMd, request.index());
|
||||
updateOperationResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
|
||||
if (updateOperationResult.hasFailure() == false) {
|
||||
// update the version on request so it will happen on the replicas
|
||||
final long version = updateOperationResult.getVersion();
|
||||
indexRequest.version(version);
|
||||
indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery());
|
||||
indexRequest.setSeqNo(updateOperationResult.getSeqNo());
|
||||
assert indexRequest.versionType().validateVersionForWrites(indexRequest.version());
|
||||
}
|
||||
break;
|
||||
case DELETED:
|
||||
DeleteRequest deleteRequest = translate.action();
|
||||
updateOperationResult = executeDeleteRequestOnPrimary(deleteRequest, primary);
|
||||
if (updateOperationResult.hasFailure() == false) {
|
||||
// update the request with the version so it will go to the replicas
|
||||
deleteRequest.versionType(deleteRequest.versionType().versionTypeForReplicationAndRecovery());
|
||||
deleteRequest.version(updateOperationResult.getVersion());
|
||||
deleteRequest.setSeqNo(updateOperationResult.getSeqNo());
|
||||
assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version());
|
||||
}
|
||||
break;
|
||||
case NOOP:
|
||||
primary.noopUpdate(updateRequest.type());
|
||||
|
@ -348,10 +292,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateDeleteRequest);
|
||||
break;
|
||||
}
|
||||
assert (replicaRequest.request() instanceof IndexRequest
|
||||
&& ((IndexRequest) replicaRequest.request()).getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) ||
|
||||
(replicaRequest.request() instanceof DeleteRequest
|
||||
&& ((DeleteRequest) replicaRequest.request()).getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO);
|
||||
assert updateOperationResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
// successful operation
|
||||
break; // out of retry loop
|
||||
} else if (updateOperationResult.getFailure() instanceof VersionConflictEngineException == false) {
|
||||
|
@ -367,20 +308,20 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
Translog.Location location = null;
|
||||
for (int i = 0; i < request.items().length; i++) {
|
||||
BulkItemRequest item = request.items()[i];
|
||||
if (item.isIgnoreOnReplica() == false) {
|
||||
assert item.getPrimaryResponse() != null : "expected primary response to be set for item [" + i + "] request ["+ item.request() +"]";
|
||||
if (item.getPrimaryResponse().isFailed() == false &&
|
||||
item.getPrimaryResponse().getResponse().getResult() != DocWriteResponse.Result.NOOP) {
|
||||
DocWriteRequest docWriteRequest = item.request();
|
||||
// ensure request version is updated for replica operation during request execution in the primary
|
||||
assert docWriteRequest.versionType() == docWriteRequest.versionType().versionTypeForReplicationAndRecovery()
|
||||
: "unexpected version in replica " + docWriteRequest.version();
|
||||
DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse();
|
||||
final Engine.Result operationResult;
|
||||
try {
|
||||
switch (docWriteRequest.opType()) {
|
||||
case CREATE:
|
||||
case INDEX:
|
||||
operationResult = executeIndexRequestOnReplica((IndexRequest) docWriteRequest, replica);
|
||||
operationResult = executeIndexRequestOnReplica(primaryResponse, (IndexRequest) docWriteRequest, replica);
|
||||
break;
|
||||
case DELETE:
|
||||
operationResult = executeDeleteRequestOnReplica((DeleteRequest) docWriteRequest, replica);
|
||||
operationResult = executeDeleteRequestOnReplica(primaryResponse, (DeleteRequest) docWriteRequest, replica);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unexpected request operation type on replica: "
|
||||
|
@ -426,17 +367,21 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
* Execute the given {@link IndexRequest} on a replica shard, throwing a
|
||||
* {@link RetryOnReplicaException} if the operation needs to be re-tried.
|
||||
*/
|
||||
public static Engine.IndexResult executeIndexRequestOnReplica(IndexRequest request, IndexShard replica) throws IOException {
|
||||
public static Engine.IndexResult executeIndexRequestOnReplica(DocWriteResponse primaryResponse, IndexRequest request, IndexShard replica) throws IOException {
|
||||
final ShardId shardId = replica.shardId();
|
||||
SourceToParse sourceToParse =
|
||||
SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source(),
|
||||
request.getContentType()).routing(request.routing()).parent(request.parent());
|
||||
|
||||
final Engine.Index operation;
|
||||
final long version = primaryResponse.getVersion();
|
||||
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
|
||||
assert versionType.validateVersionForWrites(version);
|
||||
final long seqNo = primaryResponse.getSeqNo();
|
||||
try {
|
||||
operation = replica.prepareIndexOnReplica(sourceToParse, request.getSeqNo(), request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||
operation = replica.prepareIndexOnReplica(sourceToParse, seqNo, version, versionType, request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||
} catch (MapperParsingException e) {
|
||||
return new Engine.IndexResult(e, request.version(), request.getSeqNo());
|
||||
return new Engine.IndexResult(e, version, seqNo);
|
||||
}
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
if (update != null) {
|
||||
|
@ -446,7 +391,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
}
|
||||
|
||||
/** Utility method to prepare an index operation on primary shards */
|
||||
static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
|
||||
private static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
|
||||
SourceToParse sourceToParse =
|
||||
SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source(),
|
||||
request.getContentType()).routing(request.routing()).parent(request.parent());
|
||||
|
@ -460,7 +405,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
try {
|
||||
operation = prepareIndexOperationOnPrimary(request, primary);
|
||||
} catch (MapperParsingException | IllegalArgumentException e) {
|
||||
return new Engine.IndexResult(e, request.version(), request.getSeqNo());
|
||||
return new Engine.IndexResult(e, request.version());
|
||||
}
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
final ShardId shardId = primary.shardId();
|
||||
|
@ -471,12 +416,12 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// throws IAE on conflicts merging dynamic mappings
|
||||
return new Engine.IndexResult(e, request.version(), request.getSeqNo());
|
||||
return new Engine.IndexResult(e, request.version());
|
||||
}
|
||||
try {
|
||||
operation = prepareIndexOperationOnPrimary(request, primary);
|
||||
} catch (MapperParsingException | IllegalArgumentException e) {
|
||||
return new Engine.IndexResult(e, request.version(), request.getSeqNo());
|
||||
return new Engine.IndexResult(e, request.version());
|
||||
}
|
||||
update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
if (update != null) {
|
||||
|
@ -487,14 +432,17 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
return primary.index(operation);
|
||||
}
|
||||
|
||||
public static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) throws IOException {
|
||||
private static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) throws IOException {
|
||||
final Engine.Delete delete = primary.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType());
|
||||
return primary.delete(delete);
|
||||
}
|
||||
|
||||
public static Engine.DeleteResult executeDeleteRequestOnReplica(DeleteRequest request, IndexShard replica) throws IOException {
|
||||
private static Engine.DeleteResult executeDeleteRequestOnReplica(DocWriteResponse primaryResponse, DeleteRequest request, IndexShard replica) throws IOException {
|
||||
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
|
||||
final long version = primaryResponse.getVersion();
|
||||
assert versionType.validateVersionForWrites(version);
|
||||
final Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(),
|
||||
request.getSeqNo(), request.primaryTerm(), request.version(), request.versionType());
|
||||
primaryResponse.getSeqNo(), request.primaryTerm(), version, versionType);
|
||||
return replica.delete(delete);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,19 +20,14 @@
|
|||
package org.elasticsearch.action.delete;
|
||||
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
|
||||
/**
|
||||
* The response of the delete action.
|
||||
|
@ -45,7 +40,6 @@ public class DeleteResponse extends DocWriteResponse {
|
|||
private static final String FOUND = "found";
|
||||
|
||||
public DeleteResponse() {
|
||||
|
||||
}
|
||||
|
||||
public DeleteResponse(ShardId shardId, String type, String id, long seqNo, long version, boolean found) {
|
||||
|
@ -57,37 +51,6 @@ public class DeleteResponse extends DocWriteResponse {
|
|||
return result == Result.DELETED ? super.status() : RestStatus.NOT_FOUND;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(FOUND, result == Result.DELETED);
|
||||
super.innerToXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
private static final ConstructingObjectParser<DeleteResponse, Void> PARSER;
|
||||
static {
|
||||
PARSER = new ConstructingObjectParser<>(DeleteResponse.class.getName(),
|
||||
args -> {
|
||||
// index uuid and shard id are unknown and can't be parsed back for now.
|
||||
ShardId shardId = new ShardId(new Index((String) args[0], IndexMetaData.INDEX_UUID_NA_VALUE), -1);
|
||||
String type = (String) args[1];
|
||||
String id = (String) args[2];
|
||||
long version = (long) args[3];
|
||||
ShardInfo shardInfo = (ShardInfo) args[5];
|
||||
long seqNo = (args[6] != null) ? (long) args[6] : SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
boolean found = (boolean) args[7];
|
||||
DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, version, found);
|
||||
deleteResponse.setShardInfo(shardInfo);
|
||||
return deleteResponse;
|
||||
});
|
||||
DocWriteResponse.declareParserFields(PARSER);
|
||||
PARSER.declareBoolean(constructorArg(), new ParseField(FOUND));
|
||||
}
|
||||
|
||||
public static DeleteResponse fromXContent(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
|
@ -100,4 +63,61 @@ public class DeleteResponse extends DocWriteResponse {
|
|||
builder.append(",shards=").append(getShardInfo());
|
||||
return builder.append("]").toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(FOUND, result == Result.DELETED);
|
||||
super.innerToXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static DeleteResponse fromXContent(XContentParser parser) throws IOException {
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||
|
||||
Builder context = new Builder();
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
parseXContentFields(parser, context);
|
||||
}
|
||||
return context.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the current token and update the parsing context appropriately.
|
||||
*/
|
||||
public static void parseXContentFields(XContentParser parser, Builder context) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
String currentFieldName = parser.currentName();
|
||||
|
||||
if (FOUND.equals(currentFieldName)) {
|
||||
if (token.isValue()) {
|
||||
context.setFound(parser.booleanValue());
|
||||
}
|
||||
} else {
|
||||
DocWriteResponse.parseInnerToXContent(parser, context);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Builder class for {@link DeleteResponse}. This builder is usually used during xcontent parsing to
|
||||
* temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to
|
||||
* instantiate the {@link DeleteResponse}.
|
||||
*/
|
||||
public static class Builder extends DocWriteResponse.Builder {
|
||||
|
||||
private boolean found = false;
|
||||
|
||||
public void setFound(boolean found) {
|
||||
this.found = found;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteResponse build() {
|
||||
DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, version, found);
|
||||
deleteResponse.setForcedRefresh(forcedRefresh);
|
||||
if (shardInfo != null) {
|
||||
deleteResponse.setShardInfo(shardInfo);
|
||||
}
|
||||
return deleteResponse;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
|
||||
/**
|
||||
* Constructs a new index request against the specific index and type. The
|
||||
* {@link #source(byte[])} must be set.
|
||||
* {@link #source(byte[], XContentType)} must be set.
|
||||
*/
|
||||
public IndexRequest(String index, String type) {
|
||||
this.index = index;
|
||||
|
@ -316,16 +316,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document source to index.
|
||||
*
|
||||
* @deprecated use {@link #source(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexRequest source(String source) {
|
||||
return source(new BytesArray(source), XContentFactory.xContentType(source));
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document source to index.
|
||||
*
|
||||
|
@ -383,16 +373,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document to index in bytes form.
|
||||
* @deprecated use {@link #source(BytesReference, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexRequest source(BytesReference source) {
|
||||
return source(source, XContentFactory.xContentType(source));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document to index in bytes form.
|
||||
*/
|
||||
|
@ -402,15 +382,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document to index in bytes form.
|
||||
* @deprecated use {@link #source(byte[], XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexRequest source(byte[] source) {
|
||||
return source(source, 0, source.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document to index in bytes form.
|
||||
*/
|
||||
|
@ -418,20 +389,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
return source(source, 0, source.length, xContentType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document to index in bytes form (assumed to be safe to be used from different
|
||||
* threads).
|
||||
*
|
||||
* @param source The source to index
|
||||
* @param offset The offset in the byte array
|
||||
* @param length The length of the data
|
||||
* @deprecated use {@link #source(byte[], int, int, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexRequest source(byte[] source, int offset, int length) {
|
||||
return source(new BytesArray(source, offset, length), XContentFactory.xContentType(source));
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document to index in bytes form (assumed to be safe to be used from different
|
||||
* threads).
|
||||
|
|
|
@ -80,16 +80,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source.
|
||||
* @deprecated use {@link #setSource(BytesReference, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexRequestBuilder setSource(BytesReference source) {
|
||||
request.source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source.
|
||||
*/
|
||||
|
@ -118,19 +108,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document source to index.
|
||||
* <p>
|
||||
* Note, its preferable to either set it using {@link #setSource(org.elasticsearch.common.xcontent.XContentBuilder)}
|
||||
* or using the {@link #setSource(byte[], XContentType)}.
|
||||
* @deprecated use {@link #setSource(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexRequestBuilder setSource(String source) {
|
||||
request.source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document source to index.
|
||||
* <p>
|
||||
|
@ -150,16 +127,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document to index in bytes form.
|
||||
* @deprecated use {@link #setSource(byte[], XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexRequestBuilder setSource(byte[] source) {
|
||||
request.source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document to index in bytes form.
|
||||
*/
|
||||
|
@ -168,21 +135,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document to index in bytes form (assumed to be safe to be used from different
|
||||
* threads).
|
||||
*
|
||||
* @param source The source to index
|
||||
* @param offset The offset in the byte array
|
||||
* @param length The length of the data
|
||||
* @deprecated use {@link #setSource(byte[], int, int, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexRequestBuilder setSource(byte[] source, int offset, int length) {
|
||||
request.source(source, offset, length);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the document to index in bytes form (assumed to be safe to be used from different
|
||||
* threads).
|
||||
|
|
|
@ -20,20 +20,15 @@
|
|||
package org.elasticsearch.action.index;
|
||||
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
|
||||
/**
|
||||
* A response of an index operation,
|
||||
|
@ -78,34 +73,53 @@ public class IndexResponse extends DocWriteResponse {
|
|||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* ConstructingObjectParser used to parse the {@link IndexResponse}. We use a ObjectParser here
|
||||
* because most fields are parsed by the parent abstract class {@link DocWriteResponse} and it's
|
||||
* not easy to parse part of the fields in the parent class and other fields in the children class
|
||||
* using the usual streamed parsing method.
|
||||
*/
|
||||
private static final ConstructingObjectParser<IndexResponse, Void> PARSER;
|
||||
static {
|
||||
PARSER = new ConstructingObjectParser<>(IndexResponse.class.getName(),
|
||||
args -> {
|
||||
// index uuid and shard id are unknown and can't be parsed back for now.
|
||||
ShardId shardId = new ShardId(new Index((String) args[0], IndexMetaData.INDEX_UUID_NA_VALUE), -1);
|
||||
String type = (String) args[1];
|
||||
String id = (String) args[2];
|
||||
long version = (long) args[3];
|
||||
ShardInfo shardInfo = (ShardInfo) args[5];
|
||||
long seqNo = (args[6] != null) ? (long) args[6] : SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
boolean created = (boolean) args[7];
|
||||
public static IndexResponse fromXContent(XContentParser parser) throws IOException {
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||
|
||||
IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, version, created);
|
||||
indexResponse.setShardInfo(shardInfo);
|
||||
return indexResponse;
|
||||
});
|
||||
DocWriteResponse.declareParserFields(PARSER);
|
||||
PARSER.declareBoolean(constructorArg(), new ParseField(CREATED));
|
||||
Builder context = new Builder();
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
parseXContentFields(parser, context);
|
||||
}
|
||||
return context.build();
|
||||
}
|
||||
|
||||
public static IndexResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.apply(parser, null);
|
||||
/**
|
||||
* Parse the current token and update the parsing context appropriately.
|
||||
*/
|
||||
public static void parseXContentFields(XContentParser parser, Builder context) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
String currentFieldName = parser.currentName();
|
||||
|
||||
if (CREATED.equals(currentFieldName)) {
|
||||
if (token.isValue()) {
|
||||
context.setCreated(parser.booleanValue());
|
||||
}
|
||||
} else {
|
||||
DocWriteResponse.parseInnerToXContent(parser, context);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Builder class for {@link IndexResponse}. This builder is usually used during xcontent parsing to
|
||||
* temporarily store the parsed values, then the {@link Builder#build()} method is called to
|
||||
* instantiate the {@link IndexResponse}.
|
||||
*/
|
||||
public static class Builder extends DocWriteResponse.Builder {
|
||||
|
||||
private boolean created = false;
|
||||
|
||||
public void setCreated(boolean created) {
|
||||
this.created = created;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexResponse build() {
|
||||
IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, version, created);
|
||||
indexResponse.setForcedRefresh(forcedRefresh);
|
||||
if (shardInfo != null) {
|
||||
indexResponse.setShardInfo(shardInfo);
|
||||
}
|
||||
return indexResponse;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@ import org.elasticsearch.transport.Transport;
|
|||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.StringJoiner;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Function;
|
||||
|
@ -61,7 +60,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
**/
|
||||
private final Function<String, Transport.Connection> nodeIdToConnection;
|
||||
private final SearchTask task;
|
||||
private final AtomicArray<Result> results;
|
||||
private final SearchPhaseResults<Result> results;
|
||||
private final long clusterStateVersion;
|
||||
private final Map<String, AliasFilter> aliasFilter;
|
||||
private final Map<String, Float> concreteIndexBoosts;
|
||||
|
@ -76,7 +75,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
Executor executor, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
|
||||
long clusterStateVersion, SearchTask task) {
|
||||
long clusterStateVersion, SearchTask task, SearchPhaseResults<Result> resultConsumer) {
|
||||
super(name, request, shardsIts, logger);
|
||||
this.startTime = startTime;
|
||||
this.logger = logger;
|
||||
|
@ -87,9 +86,9 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
this.listener = listener;
|
||||
this.nodeIdToConnection = nodeIdToConnection;
|
||||
this.clusterStateVersion = clusterStateVersion;
|
||||
results = new AtomicArray<>(shardsIts.size());
|
||||
this.concreteIndexBoosts = concreteIndexBoosts;
|
||||
this.aliasFilter = aliasFilter;
|
||||
this.results = resultConsumer;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -105,7 +104,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
* This is the main entry point for a search. This method starts the search execution of the initial phase.
|
||||
*/
|
||||
public final void start() {
|
||||
if (results.length() == 0) {
|
||||
if (getNumShards() == 0) {
|
||||
//no search shards to search on, bail with empty response
|
||||
//(it happens with search across _all with no indices around and consistent with broadcast operations)
|
||||
listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, buildTookInMillis(),
|
||||
|
@ -130,8 +129,8 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
onPhaseFailure(currentPhase, "all shards failed", null);
|
||||
} else {
|
||||
if (logger.isTraceEnabled()) {
|
||||
final String resultsFrom = results.asList().stream()
|
||||
.map(r -> r.value.shardTarget().toString()).collect(Collectors.joining(","));
|
||||
final String resultsFrom = results.getSuccessfulResults()
|
||||
.map(r -> r.shardTarget().toString()).collect(Collectors.joining(","));
|
||||
logger.trace("[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})",
|
||||
currentPhase.getName(), nextPhase.getName(), resultsFrom, clusterStateVersion);
|
||||
}
|
||||
|
@ -178,7 +177,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
synchronized (shardFailuresMutex) {
|
||||
shardFailures = this.shardFailures.get(); // read again otherwise somebody else has created it?
|
||||
if (shardFailures == null) { // still null so we are the first and create a new instance
|
||||
shardFailures = new AtomicArray<>(results.length());
|
||||
shardFailures = new AtomicArray<>(getNumShards());
|
||||
this.shardFailures.set(shardFailures);
|
||||
}
|
||||
}
|
||||
|
@ -194,7 +193,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
}
|
||||
}
|
||||
|
||||
if (results.get(shardIndex) != null) {
|
||||
if (results.hasResult(shardIndex)) {
|
||||
assert failure == null : "shard failed before but shouldn't: " + failure;
|
||||
successfulOps.decrementAndGet(); // if this shard was successful before (initial phase) we have to adjust the counter
|
||||
}
|
||||
|
@ -207,22 +206,22 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
* @param exception the exception explaining or causing the phase failure
|
||||
*/
|
||||
private void raisePhaseFailure(SearchPhaseExecutionException exception) {
|
||||
for (AtomicArray.Entry<Result> entry : results.asList()) {
|
||||
results.getSuccessfulResults().forEach((entry) -> {
|
||||
try {
|
||||
Transport.Connection connection = nodeIdToConnection.apply(entry.value.shardTarget().getNodeId());
|
||||
sendReleaseSearchContext(entry.value.id(), connection);
|
||||
Transport.Connection connection = nodeIdToConnection.apply(entry.shardTarget().getNodeId());
|
||||
sendReleaseSearchContext(entry.id(), connection);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(exception);
|
||||
logger.trace("failed to release context", inner);
|
||||
}
|
||||
}
|
||||
});
|
||||
listener.onFailure(exception);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void onShardSuccess(int shardIndex, Result result) {
|
||||
successfulOps.incrementAndGet();
|
||||
results.set(shardIndex, result);
|
||||
results.consumeResult(shardIndex, result);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null);
|
||||
}
|
||||
|
@ -242,7 +241,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
|
||||
@Override
|
||||
public final int getNumShards() {
|
||||
return results.length();
|
||||
return results.getNumShards();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -262,7 +261,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
|
||||
@Override
|
||||
public final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) {
|
||||
return new SearchResponse(internalSearchResponse, scrollId, results.length(), successfulOps.get(),
|
||||
return new SearchResponse(internalSearchResponse, scrollId, getNumShards(), successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures());
|
||||
}
|
||||
|
||||
|
@ -310,6 +309,5 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
* executed shard request
|
||||
* @param context the search context for the next phase
|
||||
*/
|
||||
protected abstract SearchPhase getNextPhase(AtomicArray<Result> results, SearchPhaseContext context);
|
||||
|
||||
protected abstract SearchPhase getNextPhase(SearchPhaseResults<Result> results, SearchPhaseContext context);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
|
@ -30,17 +29,13 @@ import org.elasticsearch.search.SearchShardTarget;
|
|||
* where the given index is used to set the result on the array.
|
||||
*/
|
||||
final class CountedCollector<R extends SearchPhaseResult> {
|
||||
private final AtomicArray<R> resultArray;
|
||||
private final ResultConsumer<R> resultConsumer;
|
||||
private final CountDown counter;
|
||||
private final Runnable onFinish;
|
||||
private final SearchPhaseContext context;
|
||||
|
||||
CountedCollector(AtomicArray<R> resultArray, int expectedOps, Runnable onFinish, SearchPhaseContext context) {
|
||||
if (expectedOps > resultArray.length()) {
|
||||
throw new IllegalStateException("unexpected number of operations. got: " + expectedOps + " but array size is: "
|
||||
+ resultArray.length());
|
||||
}
|
||||
this.resultArray = resultArray;
|
||||
CountedCollector(ResultConsumer<R> resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) {
|
||||
this.resultConsumer = resultConsumer;
|
||||
this.counter = new CountDown(expectedOps);
|
||||
this.onFinish = onFinish;
|
||||
this.context = context;
|
||||
|
@ -63,7 +58,7 @@ final class CountedCollector<R extends SearchPhaseResult> {
|
|||
void onResult(int index, R result, SearchShardTarget target) {
|
||||
try {
|
||||
result.shardTarget(target);
|
||||
resultArray.set(index, result);
|
||||
resultConsumer.consume(index, result);
|
||||
} finally {
|
||||
countDown();
|
||||
}
|
||||
|
@ -80,4 +75,12 @@ final class CountedCollector<R extends SearchPhaseResult> {
|
|||
countDown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A functional interface to plug in shard result consumers to this collector
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface ResultConsumer<R extends SearchPhaseResult> {
|
||||
void consume(int shardIndex, R result);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,18 +40,19 @@ import java.util.function.Function;
|
|||
* @see CountedCollector#onFailure(int, SearchShardTarget, Exception)
|
||||
*/
|
||||
final class DfsQueryPhase extends SearchPhase {
|
||||
private final AtomicArray<QuerySearchResultProvider> queryResult;
|
||||
private final InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> queryResult;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final AtomicArray<DfsSearchResult> dfsSearchResults;
|
||||
private final Function<AtomicArray<QuerySearchResultProvider>, SearchPhase> nextPhaseFactory;
|
||||
private final Function<InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider>, SearchPhase> nextPhaseFactory;
|
||||
private final SearchPhaseContext context;
|
||||
private final SearchTransportService searchTransportService;
|
||||
|
||||
DfsQueryPhase(AtomicArray<DfsSearchResult> dfsSearchResults,
|
||||
SearchPhaseController searchPhaseController,
|
||||
Function<AtomicArray<QuerySearchResultProvider>, SearchPhase> nextPhaseFactory, SearchPhaseContext context) {
|
||||
Function<InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider>, SearchPhase> nextPhaseFactory,
|
||||
SearchPhaseContext context) {
|
||||
super("dfs_query");
|
||||
this.queryResult = new AtomicArray<>(dfsSearchResults.length());
|
||||
this.queryResult = searchPhaseController.newSearchPhaseResults(context.getRequest(), context.getNumShards());
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.dfsSearchResults = dfsSearchResults;
|
||||
this.nextPhaseFactory = nextPhaseFactory;
|
||||
|
@ -64,7 +65,8 @@ final class DfsQueryPhase extends SearchPhase {
|
|||
// TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs
|
||||
// to free up memory early
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsSearchResults);
|
||||
final CountedCollector<QuerySearchResultProvider> counter = new CountedCollector<>(queryResult, dfsSearchResults.asList().size(),
|
||||
final CountedCollector<QuerySearchResultProvider> counter = new CountedCollector<>(queryResult::consumeResult,
|
||||
dfsSearchResults.asList().size(),
|
||||
() -> {
|
||||
context.executeNextPhase(this, nextPhaseFactory.apply(queryResult));
|
||||
}, context);
|
||||
|
|
|
@ -49,29 +49,31 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
private final Function<SearchResponse, SearchPhase> nextPhaseFactory;
|
||||
private final SearchPhaseContext context;
|
||||
private final Logger logger;
|
||||
private final InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> resultConsumer;
|
||||
|
||||
FetchSearchPhase(AtomicArray<QuerySearchResultProvider> queryResults,
|
||||
FetchSearchPhase(InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> resultConsumer,
|
||||
SearchPhaseController searchPhaseController,
|
||||
SearchPhaseContext context) {
|
||||
this(queryResults, searchPhaseController, context,
|
||||
this(resultConsumer, searchPhaseController, context,
|
||||
(response) -> new ExpandSearchPhase(context, response, // collapse only happens if the request has inner hits
|
||||
(finalResponse) -> sendResponsePhase(finalResponse, context)));
|
||||
}
|
||||
|
||||
FetchSearchPhase(AtomicArray<QuerySearchResultProvider> queryResults,
|
||||
FetchSearchPhase(InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> resultConsumer,
|
||||
SearchPhaseController searchPhaseController,
|
||||
SearchPhaseContext context, Function<SearchResponse, SearchPhase> nextPhaseFactory) {
|
||||
super("fetch");
|
||||
if (context.getNumShards() != queryResults.length()) {
|
||||
if (context.getNumShards() != resultConsumer.getNumShards()) {
|
||||
throw new IllegalStateException("number of shards must match the length of the query results but doesn't:"
|
||||
+ context.getNumShards() + "!=" + queryResults.length());
|
||||
+ context.getNumShards() + "!=" + resultConsumer.getNumShards());
|
||||
}
|
||||
this.fetchResults = new AtomicArray<>(queryResults.length());
|
||||
this.fetchResults = new AtomicArray<>(resultConsumer.getNumShards());
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.queryResults = queryResults;
|
||||
this.queryResults = resultConsumer.results;
|
||||
this.nextPhaseFactory = nextPhaseFactory;
|
||||
this.context = context;
|
||||
this.logger = context.getLogger();
|
||||
this.resultConsumer = resultConsumer;
|
||||
|
||||
}
|
||||
|
||||
|
@ -99,7 +101,7 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(isScrollSearch, queryResults);
|
||||
String scrollId = isScrollSearch ? TransportSearchHelper.buildScrollId(queryResults) : null;
|
||||
List<AtomicArray.Entry<QuerySearchResultProvider>> queryResultsAsList = queryResults.asList();
|
||||
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResultsAsList);
|
||||
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce();
|
||||
final boolean queryAndFetchOptimization = queryResults.length() == 1;
|
||||
final Runnable finishPhase = ()
|
||||
-> moveToNextPhase(searchPhaseController, sortedShardDocs, scrollId, reducedQueryPhase, queryAndFetchOptimization ?
|
||||
|
@ -119,7 +121,7 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch ?
|
||||
searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, sortedShardDocs, numShards)
|
||||
: null;
|
||||
final CountedCollector<FetchSearchResult> counter = new CountedCollector<>(fetchResults,
|
||||
final CountedCollector<FetchSearchResult> counter = new CountedCollector<>(fetchResults::set,
|
||||
docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not
|
||||
finishPhase, context);
|
||||
for (int i = 0; i < docIdsToLoad.length; i++) {
|
||||
|
|
|
@ -28,12 +28,14 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
|||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
* This is an abstract base class that encapsulates the logic to fan out to all shards in provided {@link GroupShardsIterator}
|
||||
|
@ -213,4 +215,53 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
* @param listener the listener to notify on response
|
||||
*/
|
||||
protected abstract void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener<FirstResult> listener);
|
||||
|
||||
/**
|
||||
* This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing
|
||||
*/
|
||||
static class SearchPhaseResults<Result extends SearchPhaseResult> {
|
||||
final AtomicArray<Result> results;
|
||||
|
||||
SearchPhaseResults(int size) {
|
||||
results = new AtomicArray<>(size);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of expected results this class should collect
|
||||
*/
|
||||
final int getNumShards() {
|
||||
return results.length();
|
||||
}
|
||||
|
||||
/**
|
||||
* A stream of all non-null (successful) shard results
|
||||
*/
|
||||
final Stream<Result> getSuccessfulResults() {
|
||||
return results.asList().stream().map(e -> e.value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Consumes a single shard result
|
||||
* @param shardIndex the shards index, this is a 0-based id that is used to establish a 1 to 1 mapping to the searched shards
|
||||
* @param result the shards result
|
||||
*/
|
||||
void consumeResult(int shardIndex, Result result) {
|
||||
assert results.get(shardIndex) == null : "shardIndex: " + shardIndex + " is already set";
|
||||
results.set(shardIndex, result);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff a result if present for the given shard ID.
|
||||
*/
|
||||
final boolean hasResult(int shardIndex) {
|
||||
return results.get(shardIndex) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces the collected results
|
||||
*/
|
||||
SearchPhaseController.ReducedQueryPhase reduce() {
|
||||
throw new UnsupportedOperationException("reduce is not supported");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
@ -43,7 +42,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
|
|||
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
|
||||
long clusterStateVersion, SearchTask task) {
|
||||
super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor,
|
||||
request, listener, shardsIts, startTime, clusterStateVersion, task);
|
||||
request, listener, shardsIts, startTime, clusterStateVersion, task, new SearchPhaseResults<>(shardsIts.size()));
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
|
@ -54,8 +53,8 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
|
|||
}
|
||||
|
||||
@Override
|
||||
protected SearchPhase getNextPhase(AtomicArray<DfsSearchResult> results, SearchPhaseContext context) {
|
||||
return new DfsQueryPhase(results, searchPhaseController,
|
||||
protected SearchPhase getNextPhase(SearchPhaseResults<DfsSearchResult> results, SearchPhaseContext context) {
|
||||
return new DfsQueryPhase(results.results, searchPhaseController,
|
||||
(queryResults) -> new FetchSearchPhase(queryResults, searchPhaseController, context), context);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,4 +114,5 @@ interface SearchPhaseContext extends ActionListener<SearchResponse>, Executor {
|
|||
* a response is returned to the user indicating that all shards have failed.
|
||||
*/
|
||||
void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase);
|
||||
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation;
|
|||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
|
@ -70,14 +71,6 @@ import java.util.stream.StreamSupport;
|
|||
|
||||
public class SearchPhaseController extends AbstractComponent {
|
||||
|
||||
private static final Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>> QUERY_RESULT_ORDERING = (o1, o2) -> {
|
||||
int i = o1.value.shardTarget().getIndex().compareTo(o2.value.shardTarget().getIndex());
|
||||
if (i == 0) {
|
||||
i = o1.value.shardTarget().getShardId().id() - o2.value.shardTarget().getShardId().id();
|
||||
}
|
||||
return i;
|
||||
};
|
||||
|
||||
private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0];
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
|
@ -149,6 +142,9 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
* named completion suggestion across all shards. If more than one named completion suggestion is specified in the
|
||||
* request, the suggest docs for a named suggestion are ordered by the suggestion name.
|
||||
*
|
||||
* Note: The order of the sorted score docs depends on the shard index in the result array if the merge process needs to disambiguate
|
||||
* the result. In oder to obtain stable results the shard index (index of the result in the result array) must be the same.
|
||||
*
|
||||
* @param ignoreFrom Whether to ignore the from and sort all hits in each shard result.
|
||||
* Enabled only for scroll search, because that only retrieves hits of length 'size' in the query phase.
|
||||
* @param resultsArr Shard result holder
|
||||
|
@ -159,26 +155,31 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
return EMPTY_DOCS;
|
||||
}
|
||||
|
||||
final QuerySearchResult result;
|
||||
boolean canOptimize = false;
|
||||
QuerySearchResult result = null;
|
||||
int shardIndex = -1;
|
||||
if (results.size() == 1) {
|
||||
canOptimize = true;
|
||||
result = results.get(0).value.queryResult();
|
||||
shardIndex = results.get(0).index;
|
||||
} else {
|
||||
boolean hasResult = false;
|
||||
QuerySearchResult resultToOptimize = null;
|
||||
// lets see if we only got hits from a single shard, if so, we can optimize...
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : results) {
|
||||
if (entry.value.queryResult().hasHits()) {
|
||||
if (result != null) { // we already have one, can't really optimize
|
||||
if (hasResult) { // we already have one, can't really optimize
|
||||
canOptimize = false;
|
||||
break;
|
||||
}
|
||||
canOptimize = true;
|
||||
result = entry.value.queryResult();
|
||||
hasResult = true;
|
||||
resultToOptimize = entry.value.queryResult();
|
||||
shardIndex = entry.index;
|
||||
}
|
||||
}
|
||||
result = canOptimize ? resultToOptimize : results.get(0).value.queryResult();
|
||||
assert result != null;
|
||||
}
|
||||
if (canOptimize) {
|
||||
int offset = result.from();
|
||||
|
@ -224,74 +225,62 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
return docs;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
AtomicArray.Entry<? extends QuerySearchResultProvider>[] sortedResults = results.toArray(new AtomicArray.Entry[results.size()]);
|
||||
Arrays.sort(sortedResults, QUERY_RESULT_ORDERING);
|
||||
QuerySearchResultProvider firstResult = sortedResults[0].value;
|
||||
|
||||
int topN = firstResult.queryResult().size();
|
||||
int from = firstResult.queryResult().from();
|
||||
if (ignoreFrom) {
|
||||
from = 0;
|
||||
}
|
||||
final int topN = result.queryResult().size();
|
||||
final int from = ignoreFrom ? 0 : result.queryResult().from();
|
||||
|
||||
final TopDocs mergedTopDocs;
|
||||
int numShards = resultsArr.length();
|
||||
if (firstResult.queryResult().topDocs() instanceof CollapseTopFieldDocs) {
|
||||
CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) firstResult.queryResult().topDocs();
|
||||
final int numShards = resultsArr.length();
|
||||
if (result.queryResult().topDocs() instanceof CollapseTopFieldDocs) {
|
||||
CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) result.queryResult().topDocs();
|
||||
final Sort sort = new Sort(firstTopDocs.fields);
|
||||
|
||||
final CollapseTopFieldDocs[] shardTopDocs = new CollapseTopFieldDocs[numShards];
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : sortedResults) {
|
||||
if (result.size() != shardTopDocs.length) {
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
final CollapseTopFieldDocs empty = new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0],
|
||||
sort.getSort(), new Object[0], Float.NaN);
|
||||
Arrays.fill(shardTopDocs, empty);
|
||||
}
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
|
||||
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
|
||||
// the 'index' field is the position in the resultsArr atomic array
|
||||
shardTopDocs[sortedResult.index] = (CollapseTopFieldDocs) topDocs;
|
||||
}
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
for (int i = 0; i < shardTopDocs.length; ++i) {
|
||||
if (shardTopDocs[i] == null) {
|
||||
shardTopDocs[i] = new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0],
|
||||
sort.getSort(), new Object[0], Float.NaN);
|
||||
}
|
||||
}
|
||||
mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs);
|
||||
} else if (firstResult.queryResult().topDocs() instanceof TopFieldDocs) {
|
||||
TopFieldDocs firstTopDocs = (TopFieldDocs) firstResult.queryResult().topDocs();
|
||||
} else if (result.queryResult().topDocs() instanceof TopFieldDocs) {
|
||||
TopFieldDocs firstTopDocs = (TopFieldDocs) result.queryResult().topDocs();
|
||||
final Sort sort = new Sort(firstTopDocs.fields);
|
||||
|
||||
final TopFieldDocs[] shardTopDocs = new TopFieldDocs[resultsArr.length()];
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : sortedResults) {
|
||||
if (result.size() != shardTopDocs.length) {
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
final TopFieldDocs empty = new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN);
|
||||
Arrays.fill(shardTopDocs, empty);
|
||||
}
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
|
||||
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
|
||||
// the 'index' field is the position in the resultsArr atomic array
|
||||
shardTopDocs[sortedResult.index] = (TopFieldDocs) topDocs;
|
||||
}
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
for (int i = 0; i < shardTopDocs.length; ++i) {
|
||||
if (shardTopDocs[i] == null) {
|
||||
shardTopDocs[i] = new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN);
|
||||
}
|
||||
}
|
||||
mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs);
|
||||
} else {
|
||||
final TopDocs[] shardTopDocs = new TopDocs[resultsArr.length()];
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : sortedResults) {
|
||||
if (result.size() != shardTopDocs.length) {
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
Arrays.fill(shardTopDocs, Lucene.EMPTY_TOP_DOCS);
|
||||
}
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
|
||||
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
|
||||
// the 'index' field is the position in the resultsArr atomic array
|
||||
shardTopDocs[sortedResult.index] = topDocs;
|
||||
}
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
for (int i = 0; i < shardTopDocs.length; ++i) {
|
||||
if (shardTopDocs[i] == null) {
|
||||
shardTopDocs[i] = Lucene.EMPTY_TOP_DOCS;
|
||||
}
|
||||
}
|
||||
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs);
|
||||
}
|
||||
|
||||
ScoreDoc[] scoreDocs = mergedTopDocs.scoreDocs;
|
||||
final Map<String, List<Suggestion<CompletionSuggestion.Entry>>> groupedCompletionSuggestions = new HashMap<>();
|
||||
// group suggestions and assign shard index
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : sortedResults) {
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
|
||||
Suggest shardSuggest = sortedResult.value.queryResult().suggest();
|
||||
if (shardSuggest != null) {
|
||||
for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) {
|
||||
|
@ -461,23 +450,54 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
|
||||
/**
|
||||
* Reduces the given query results and consumes all aggregations and profile results.
|
||||
* @param queryResults a list of non-null query shard results
|
||||
*/
|
||||
public final ReducedQueryPhase reducedQueryPhase(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults) {
|
||||
return reducedQueryPhase(queryResults, null, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces the given query results and consumes all aggregations and profile results.
|
||||
* @param queryResults a list of non-null query shard results
|
||||
* @param bufferdAggs a list of pre-collected / buffered aggregations. if this list is non-null all aggregations have been consumed
|
||||
* from all non-null query results.
|
||||
* @param numReducePhases the number of non-final reduce phases applied to the query results.
|
||||
* @see QuerySearchResult#consumeAggs()
|
||||
* @see QuerySearchResult#consumeProfileResult()
|
||||
*/
|
||||
public final ReducedQueryPhase reducedQueryPhase(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults) {
|
||||
private ReducedQueryPhase reducedQueryPhase(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults,
|
||||
List<InternalAggregations> bufferdAggs, int numReducePhases) {
|
||||
assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases;
|
||||
numReducePhases++; // increment for this phase
|
||||
long totalHits = 0;
|
||||
long fetchHits = 0;
|
||||
float maxScore = Float.NEGATIVE_INFINITY;
|
||||
boolean timedOut = false;
|
||||
Boolean terminatedEarly = null;
|
||||
if (queryResults.isEmpty()) {
|
||||
return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, null, null, null, null);
|
||||
if (queryResults.isEmpty()) { // early terminate we have nothing to reduce
|
||||
return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, null, null, null, null,
|
||||
numReducePhases);
|
||||
}
|
||||
QuerySearchResult firstResult = queryResults.get(0).value.queryResult();
|
||||
final QuerySearchResult firstResult = queryResults.get(0).value.queryResult();
|
||||
final boolean hasSuggest = firstResult.suggest() != null;
|
||||
final boolean hasAggs = firstResult.hasAggs();
|
||||
final boolean hasProfileResults = firstResult.hasProfileResults();
|
||||
final List<InternalAggregations> aggregationsList = hasAggs ? new ArrayList<>(queryResults.size()) : Collections.emptyList();
|
||||
final boolean consumeAggs;
|
||||
final List<InternalAggregations> aggregationsList;
|
||||
if (bufferdAggs != null) {
|
||||
consumeAggs = false;
|
||||
// we already have results from intermediate reduces and just need to perform the final reduce
|
||||
assert firstResult.hasAggs() : "firstResult has no aggs but we got non null buffered aggs?";
|
||||
aggregationsList = bufferdAggs;
|
||||
} else if (firstResult.hasAggs()) {
|
||||
// the number of shards was less than the buffer size so we reduce agg results directly
|
||||
aggregationsList = new ArrayList<>(queryResults.size());
|
||||
consumeAggs = true;
|
||||
} else {
|
||||
// no aggregations
|
||||
aggregationsList = Collections.emptyList();
|
||||
consumeAggs = false;
|
||||
}
|
||||
|
||||
// count the total (we use the query result provider here, since we might not get any hits (we scrolled past them))
|
||||
final Map<String, List<Suggestion>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap();
|
||||
final Map<String, ProfileShardResult> profileResults = hasProfileResults ? new HashMap<>(queryResults.size())
|
||||
|
@ -506,7 +526,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
suggestionList.add(suggestion);
|
||||
}
|
||||
}
|
||||
if (hasAggs) {
|
||||
if (consumeAggs) {
|
||||
aggregationsList.add((InternalAggregations) result.consumeAggs());
|
||||
}
|
||||
if (hasProfileResults) {
|
||||
|
@ -515,16 +535,27 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
final Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions));
|
||||
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, true);
|
||||
final InternalAggregations aggregations = aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList,
|
||||
firstResult.pipelineAggregators());
|
||||
firstResult.pipelineAggregators(), reduceContext);
|
||||
final SearchProfileShardResults shardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults);
|
||||
return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, firstResult, suggest, aggregations,
|
||||
shardResults);
|
||||
shardResults, numReducePhases);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Performs an intermediate reduce phase on the aggregations. For instance with this reduce phase never prune information
|
||||
* that relevant for the final reduce step. For final reduce see {@link #reduceAggs(List, List, ReduceContext)}
|
||||
*/
|
||||
private InternalAggregations reduceAggsIncrementally(List<InternalAggregations> aggregationsList) {
|
||||
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, false);
|
||||
return aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList,
|
||||
null, reduceContext);
|
||||
}
|
||||
|
||||
private InternalAggregations reduceAggs(List<InternalAggregations> aggregationsList,
|
||||
List<SiblingPipelineAggregator> pipelineAggregators) {
|
||||
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService);
|
||||
List<SiblingPipelineAggregator> pipelineAggregators, ReduceContext reduceContext) {
|
||||
InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, reduceContext);
|
||||
if (pipelineAggregators != null) {
|
||||
List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false)
|
||||
|
@ -558,10 +589,15 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
final InternalAggregations aggregations;
|
||||
// the reduced profile results
|
||||
final SearchProfileShardResults shardResults;
|
||||
// the number of reduces phases
|
||||
final int numReducePhases;
|
||||
|
||||
ReducedQueryPhase(long totalHits, long fetchHits, float maxScore, boolean timedOut, Boolean terminatedEarly,
|
||||
QuerySearchResult oneResult, Suggest suggest, InternalAggregations aggregations,
|
||||
SearchProfileShardResults shardResults) {
|
||||
SearchProfileShardResults shardResults, int numReducePhases) {
|
||||
if (numReducePhases <= 0) {
|
||||
throw new IllegalArgumentException("at least one reduce phase must have been applied but was: " + numReducePhases);
|
||||
}
|
||||
this.totalHits = totalHits;
|
||||
this.fetchHits = fetchHits;
|
||||
if (Float.isInfinite(maxScore)) {
|
||||
|
@ -575,6 +611,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
this.suggest = suggest;
|
||||
this.aggregations = aggregations;
|
||||
this.shardResults = shardResults;
|
||||
this.numReducePhases = numReducePhases;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -582,7 +619,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
* @see #merge(boolean, ScoreDoc[], ReducedQueryPhase, AtomicArray)
|
||||
*/
|
||||
public InternalSearchResponse buildResponse(SearchHits hits) {
|
||||
return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly);
|
||||
return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly, numReducePhases);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -593,4 +630,95 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@link org.elasticsearch.action.search.InitialSearchPhase.SearchPhaseResults} implementation
|
||||
* that incrementally reduces aggregation results as shard results are consumed.
|
||||
* This implementation can be configured to batch up a certain amount of results and only reduce them
|
||||
* iff the buffer is exhausted.
|
||||
*/
|
||||
static final class QueryPhaseResultConsumer
|
||||
extends InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> {
|
||||
private final InternalAggregations[] buffer;
|
||||
private int index;
|
||||
private final SearchPhaseController controller;
|
||||
private int numReducePhases = 0;
|
||||
|
||||
/**
|
||||
* Creates a new {@link QueryPhaseResultConsumer}
|
||||
* @param controller a controller instance to reduce the query response objects
|
||||
* @param expectedResultSize the expected number of query results. Corresponds to the number of shards queried
|
||||
* @param bufferSize the size of the reduce buffer. if the buffer size is smaller than the number of expected results
|
||||
* the buffer is used to incrementally reduce aggregation results before all shards responded.
|
||||
*/
|
||||
private QueryPhaseResultConsumer(SearchPhaseController controller, int expectedResultSize, int bufferSize) {
|
||||
super(expectedResultSize);
|
||||
if (expectedResultSize != 1 && bufferSize < 2) {
|
||||
throw new IllegalArgumentException("buffer size must be >= 2 if there is more than one expected result");
|
||||
}
|
||||
if (expectedResultSize <= bufferSize) {
|
||||
throw new IllegalArgumentException("buffer size must be less than the expected result size");
|
||||
}
|
||||
this.controller = controller;
|
||||
// no need to buffer anything if we have less expected results. in this case we don't consume any results ahead of time.
|
||||
this.buffer = new InternalAggregations[bufferSize];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void consumeResult(int shardIndex, QuerySearchResultProvider result) {
|
||||
super.consumeResult(shardIndex, result);
|
||||
QuerySearchResult queryResult = result.queryResult();
|
||||
assert queryResult.hasAggs() : "this collector should only be used if aggs are requested";
|
||||
consumeInternal(queryResult);
|
||||
}
|
||||
|
||||
private synchronized void consumeInternal(QuerySearchResult querySearchResult) {
|
||||
InternalAggregations aggregations = (InternalAggregations) querySearchResult.consumeAggs();
|
||||
if (index == buffer.length) {
|
||||
InternalAggregations reducedAggs = controller.reduceAggsIncrementally(Arrays.asList(buffer));
|
||||
Arrays.fill(buffer, null);
|
||||
numReducePhases++;
|
||||
buffer[0] = reducedAggs;
|
||||
index = 1;
|
||||
}
|
||||
final int i = index++;
|
||||
buffer[i] = aggregations;
|
||||
}
|
||||
|
||||
private synchronized List<InternalAggregations> getRemaining() {
|
||||
return Arrays.asList(buffer).subList(0, index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReducedQueryPhase reduce() {
|
||||
return controller.reducedQueryPhase(results.asList(), getRemaining(), numReducePhases);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of buffered results
|
||||
*/
|
||||
int getNumBuffered() {
|
||||
return index;
|
||||
}
|
||||
|
||||
int getNumReducePhases() { return numReducePhases; }
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new SearchPhaseResults instance. This might return an instance that reduces search responses incrementally.
|
||||
*/
|
||||
InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> newSearchPhaseResults(SearchRequest request, int numShards) {
|
||||
SearchSourceBuilder source = request.source();
|
||||
if (source != null && source.aggregations() != null) {
|
||||
if (request.getBatchedReduceSize() < numShards) {
|
||||
// only use this if there are aggs and if there are more shards than we should reduce at once
|
||||
return new QueryPhaseResultConsumer(this, numShards, request.getBatchedReduceSize());
|
||||
}
|
||||
}
|
||||
return new InitialSearchPhase.SearchPhaseResults(numShards) {
|
||||
@Override
|
||||
public ReducedQueryPhase reduce() {
|
||||
return reducedQueryPhase(results.asList());
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
@ -44,17 +43,19 @@ final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<Qu
|
|||
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion,
|
||||
SearchTask task) {
|
||||
super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor,
|
||||
request, listener, shardsIts, startTime, clusterStateVersion, task);
|
||||
request, listener, shardsIts, startTime, clusterStateVersion, task,
|
||||
searchPhaseController.newSearchPhaseResults(request, shardsIts.size()));
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
|
||||
protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener listener) {
|
||||
getSearchTransport().sendExecuteQuery(getConnection(shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt, shard), getTask(), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearchPhase getNextPhase(AtomicArray<QuerySearchResultProvider> results, SearchPhaseContext context) {
|
||||
protected SearchPhase getNextPhase(SearchPhaseResults<QuerySearchResultProvider> results, SearchPhaseContext context) {
|
||||
return new FetchSearchPhase(results, searchPhaseController, context);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,6 +70,8 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
|
|||
|
||||
private Scroll scroll;
|
||||
|
||||
private int batchedReduceSize = 512;
|
||||
|
||||
private String[] types = Strings.EMPTY_ARRAY;
|
||||
|
||||
public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosed();
|
||||
|
@ -274,6 +276,25 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
|
|||
return this.requestCache;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection
|
||||
* mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large.
|
||||
*/
|
||||
public void setBatchedReduceSize(int batchedReduceSize) {
|
||||
if (batchedReduceSize <= 1) {
|
||||
throw new IllegalArgumentException("batchedReduceSize must be >= 2");
|
||||
}
|
||||
this.batchedReduceSize = batchedReduceSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of shard results that should be reduced at once on the coordinating node. This value should be used as a
|
||||
* protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large.
|
||||
*/
|
||||
public int getBatchedReduceSize() {
|
||||
return batchedReduceSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the request only has suggest
|
||||
*/
|
||||
|
@ -320,6 +341,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
|
|||
types = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
requestCache = in.readOptionalBoolean();
|
||||
batchedReduceSize = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -337,6 +359,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
|
|||
out.writeStringArray(types);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
out.writeOptionalBoolean(requestCache);
|
||||
out.writeVInt(batchedReduceSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -523,4 +523,13 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
}
|
||||
return request.source();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection
|
||||
* mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large.
|
||||
*/
|
||||
public SearchRequestBuilder setBatchedReduceSize(int batchedReduceSize) {
|
||||
this.request.setBatchedReduceSize(batchedReduceSize);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,7 +61,8 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
public SearchResponse() {
|
||||
}
|
||||
|
||||
public SearchResponse(InternalSearchResponse internalResponse, String scrollId, int totalShards, int successfulShards, long tookInMillis, ShardSearchFailure[] shardFailures) {
|
||||
public SearchResponse(InternalSearchResponse internalResponse, String scrollId, int totalShards, int successfulShards,
|
||||
long tookInMillis, ShardSearchFailure[] shardFailures) {
|
||||
this.internalResponse = internalResponse;
|
||||
this.scrollId = scrollId;
|
||||
this.totalShards = totalShards;
|
||||
|
@ -106,6 +107,13 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
return internalResponse.terminatedEarly();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of reduce phases applied to obtain this search response
|
||||
*/
|
||||
public int getNumReducePhases() {
|
||||
return internalResponse.getNumReducePhases();
|
||||
}
|
||||
|
||||
/**
|
||||
* How long the search took.
|
||||
*/
|
||||
|
@ -172,13 +180,6 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
return internalResponse.profile();
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String _SCROLL_ID = "_scroll_id";
|
||||
static final String TOOK = "took";
|
||||
static final String TIMED_OUT = "timed_out";
|
||||
static final String TERMINATED_EARLY = "terminated_early";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
|
@ -189,14 +190,18 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
|
||||
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (scrollId != null) {
|
||||
builder.field(Fields._SCROLL_ID, scrollId);
|
||||
builder.field("_scroll_id", scrollId);
|
||||
}
|
||||
builder.field(Fields.TOOK, tookInMillis);
|
||||
builder.field(Fields.TIMED_OUT, isTimedOut());
|
||||
builder.field("took", tookInMillis);
|
||||
builder.field("timed_out", isTimedOut());
|
||||
if (isTerminatedEarly() != null) {
|
||||
builder.field(Fields.TERMINATED_EARLY, isTerminatedEarly());
|
||||
builder.field("terminated_early", isTerminatedEarly());
|
||||
}
|
||||
RestActions.buildBroadcastShardsHeader(builder, params, getTotalShards(), getSuccessfulShards(), getFailedShards(), getShardFailures());
|
||||
if (getNumReducePhases() != 1) {
|
||||
builder.field("num_reduce_phases", getNumReducePhases());
|
||||
}
|
||||
RestActions.buildBroadcastShardsHeader(builder, params, getTotalShards(), getSuccessfulShards(), getFailedShards(),
|
||||
getShardFailures());
|
||||
internalResponse.toXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -19,14 +19,12 @@
|
|||
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -38,8 +36,6 @@ import java.io.IOException;
|
|||
public abstract class ReplicatedWriteRequest<R extends ReplicatedWriteRequest<R>> extends ReplicationRequest<R> implements WriteRequest<R> {
|
||||
private RefreshPolicy refreshPolicy = RefreshPolicy.NONE;
|
||||
|
||||
private long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
|
||||
/**
|
||||
* Constructor for deserialization.
|
||||
*/
|
||||
|
@ -66,32 +62,11 @@ public abstract class ReplicatedWriteRequest<R extends ReplicatedWriteRequest<R>
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
refreshPolicy = RefreshPolicy.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
seqNo = in.readZLong();
|
||||
} else {
|
||||
seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
refreshPolicy.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
out.writeZLong(seqNo);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the sequence number for this operation. The sequence number is assigned while the operation
|
||||
* is performed on the primary shard.
|
||||
*/
|
||||
public long getSeqNo() {
|
||||
return seqNo;
|
||||
}
|
||||
|
||||
/** sets the sequence number for this operation. should only be called on the primary shard */
|
||||
public void setSeqNo(long seqNo) {
|
||||
this.seqNo = seqNo;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,6 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
|
||||
|
@ -74,7 +73,6 @@ public class ReplicationResponse extends ActionResponse {
|
|||
|
||||
public static class ShardInfo implements Streamable, ToXContentObject {
|
||||
|
||||
private static final String _SHARDS = "_shards";
|
||||
private static final String TOTAL = "total";
|
||||
private static final String SUCCESSFUL = "successful";
|
||||
private static final String FAILED = "failed";
|
||||
|
@ -134,25 +132,6 @@ public class ReplicationResponse extends ActionResponse {
|
|||
return status;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object that) {
|
||||
if (this == that) {
|
||||
return true;
|
||||
}
|
||||
if (that == null || getClass() != that.getClass()) {
|
||||
return false;
|
||||
}
|
||||
ShardInfo other = (ShardInfo) that;
|
||||
return Objects.equals(total, other.total) &&
|
||||
Objects.equals(successful, other.successful) &&
|
||||
Arrays.equals(failures, other.failures);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(total, successful, failures);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
total = in.readVInt();
|
||||
|
@ -327,27 +306,6 @@ public class ReplicationResponse extends ActionResponse {
|
|||
return primary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object that) {
|
||||
if (this == that) {
|
||||
return true;
|
||||
}
|
||||
if (that == null || getClass() != that.getClass()) {
|
||||
return false;
|
||||
}
|
||||
Failure failure = (Failure) that;
|
||||
return Objects.equals(primary, failure.primary) &&
|
||||
Objects.equals(shardId, failure.shardId) &&
|
||||
Objects.equals(nodeId, failure.nodeId) &&
|
||||
Objects.equals(cause, failure.cause) &&
|
||||
Objects.equals(status, failure.status);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(shardId, nodeId, cause, status, primary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
shardId = ShardId.readShardId(in);
|
||||
|
|
|
@ -171,7 +171,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
final ShardId shardId = request.getShardId();
|
||||
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
final IndexShard indexShard = indexService.getShard(shardId.getId());
|
||||
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard, threadPool::estimatedTimeInMillis);
|
||||
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard, threadPool::absoluteTimeInMillis);
|
||||
switch (result.getResponseResult()) {
|
||||
case CREATED:
|
||||
IndexRequest upsertRequest = result.action();
|
||||
|
|
|
@ -30,8 +30,11 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
|
@ -49,7 +52,7 @@ import java.util.Map;
|
|||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
implements DocWriteRequest<UpdateRequest>, WriteRequest<UpdateRequest> {
|
||||
implements DocWriteRequest<UpdateRequest>, WriteRequest<UpdateRequest>, ToXContentObject {
|
||||
|
||||
private String type;
|
||||
private String id;
|
||||
|
@ -553,16 +556,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc to use for updates when a script is not specified.
|
||||
* @deprecated use {@link #doc(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequest doc(String source) {
|
||||
safeDoc().source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc to use for updates when a script is not specified.
|
||||
*/
|
||||
|
@ -571,16 +564,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc to use for updates when a script is not specified.
|
||||
* @deprecated use {@link #doc(byte[], XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequest doc(byte[] source) {
|
||||
safeDoc().source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc to use for updates when a script is not specified.
|
||||
*/
|
||||
|
@ -589,16 +572,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc to use for updates when a script is not specified.
|
||||
* @deprecated use {@link #doc(byte[], int, int, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequest doc(byte[] source, int offset, int length) {
|
||||
safeDoc().source(source, offset, length);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc to use for updates when a script is not specified.
|
||||
*/
|
||||
|
@ -669,16 +642,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc source of the update request to be used when the document does not exists.
|
||||
* @deprecated use {@link #upsert(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequest upsert(String source) {
|
||||
safeUpsertRequest().source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc source of the update request to be used when the document does not exists.
|
||||
*/
|
||||
|
@ -687,16 +650,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc source of the update request to be used when the document does not exists.
|
||||
* @deprecated use {@link #upsert(byte[], XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequest upsert(byte[] source) {
|
||||
safeUpsertRequest().source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc source of the update request to be used when the document does not exists.
|
||||
*/
|
||||
|
@ -705,16 +658,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc source of the update request to be used when the document does not exists.
|
||||
* @deprecated use {@link #upsert(byte[], int, int, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequest upsert(byte[] source, int offset, int length) {
|
||||
safeUpsertRequest().source(source, offset, length);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc source of the update request to be used when the document does not exists.
|
||||
*/
|
||||
|
@ -906,4 +849,42 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
out.writeBoolean(scriptedUpsert);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
if (docAsUpsert) {
|
||||
builder.field("doc_as_upsert", docAsUpsert);
|
||||
}
|
||||
if (doc != null) {
|
||||
XContentType xContentType = doc.getContentType();
|
||||
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, doc.source(), xContentType)) {
|
||||
builder.field("doc");
|
||||
builder.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
if (script != null) {
|
||||
builder.field("script", script);
|
||||
}
|
||||
if (upsertRequest != null) {
|
||||
XContentType xContentType = upsertRequest.getContentType();
|
||||
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, upsertRequest.source(), xContentType)) {
|
||||
builder.field("upsert");
|
||||
builder.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
if (scriptedUpsert) {
|
||||
builder.field("scripted_upsert", scriptedUpsert);
|
||||
}
|
||||
if (detectNoop == false) {
|
||||
builder.field("detect_noop", detectNoop);
|
||||
}
|
||||
if (fields != null) {
|
||||
builder.array("fields", fields);
|
||||
}
|
||||
if (fetchSourceContext != null) {
|
||||
builder.field("_source", fetchSourceContext);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -221,16 +221,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc to use for updates when a script is not specified.
|
||||
* @deprecated use {@link #setDoc(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequestBuilder setDoc(String source) {
|
||||
request.doc(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc to use for updates when a script is not specified.
|
||||
*/
|
||||
|
@ -239,16 +229,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc to use for updates when a script is not specified.
|
||||
* @deprecated use {@link #setDoc(byte[], XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequestBuilder setDoc(byte[] source) {
|
||||
request.doc(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc to use for updates when a script is not specified.
|
||||
*/
|
||||
|
@ -257,16 +237,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc to use for updates when a script is not specified.
|
||||
* @deprecated use {@link #setDoc(byte[], int, int, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequestBuilder setDoc(byte[] source, int offset, int length) {
|
||||
request.doc(source, offset, length);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc to use for updates when a script is not specified.
|
||||
*/
|
||||
|
@ -326,16 +296,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc source of the update request to be used when the document does not exists.
|
||||
* @deprecated use {@link #setUpsert(String, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequestBuilder setUpsert(String source) {
|
||||
request.upsert(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc source of the update request to be used when the document does not exists.
|
||||
*/
|
||||
|
@ -344,16 +304,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc source of the update request to be used when the document does not exists.
|
||||
* @deprecated use {@link #setDoc(byte[], XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequestBuilder setUpsert(byte[] source) {
|
||||
request.upsert(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc source of the update request to be used when the document does not exists.
|
||||
*/
|
||||
|
@ -362,16 +312,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc source of the update request to be used when the document does not exists.
|
||||
* @deprecated use {@link #setUpsert(byte[], int, int, XContentType)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequestBuilder setUpsert(byte[] source, int offset, int length) {
|
||||
request.upsert(source, offset, length);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the doc source of the update request to be used when the document does not exists.
|
||||
*/
|
||||
|
|
|
@ -20,21 +20,18 @@
|
|||
package org.elasticsearch.action.update;
|
||||
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
|
||||
public class UpdateResponse extends DocWriteResponse {
|
||||
|
||||
|
@ -114,44 +111,59 @@ public class UpdateResponse extends DocWriteResponse {
|
|||
return builder.append("]").toString();
|
||||
}
|
||||
|
||||
private static final ConstructingObjectParser<UpdateResponse, Void> PARSER;
|
||||
static {
|
||||
PARSER = new ConstructingObjectParser<>(UpdateResponse.class.getName(),
|
||||
args -> {
|
||||
// index uuid and shard id are unknown and can't be parsed back for now.
|
||||
String index = (String) args[0];
|
||||
ShardId shardId = new ShardId(new Index(index, IndexMetaData.INDEX_UUID_NA_VALUE), -1);
|
||||
String type = (String) args[1];
|
||||
String id = (String) args[2];
|
||||
long version = (long) args[3];
|
||||
ShardInfo shardInfo = (ShardInfo) args[5];
|
||||
Long seqNo = (Long) args[6];
|
||||
public static UpdateResponse fromXContent(XContentParser parser) throws IOException {
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||
|
||||
Result result = null;
|
||||
for (Result r : Result.values()) {
|
||||
if (r.getLowercase().equals(args[4])) {
|
||||
result = r;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
UpdateResponse updateResponse = null;
|
||||
if (shardInfo != null && seqNo != null) {
|
||||
updateResponse = new UpdateResponse(shardInfo, shardId, type, id, seqNo, version, result);
|
||||
} else {
|
||||
updateResponse = new UpdateResponse(shardId, type, id, version, result);
|
||||
}
|
||||
return updateResponse;
|
||||
});
|
||||
|
||||
DocWriteResponse.declareParserFields(PARSER);
|
||||
BiConsumer<UpdateResponse, GetResult> setGetResult = (update, get) ->
|
||||
update.setGetResult(new GetResult(update.getIndex(), update.getType(), update.getId(), update.getVersion(),
|
||||
get.isExists(), get.internalSourceRef(), get.getFields()));
|
||||
PARSER.declareObject(setGetResult, (parser, context) -> GetResult.fromXContentEmbedded(parser), new ParseField(GET));
|
||||
Builder context = new Builder();
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
parseXContentFields(parser, context);
|
||||
}
|
||||
return context.build();
|
||||
}
|
||||
|
||||
public static UpdateResponse fromXContent(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
/**
|
||||
* Parse the current token and update the parsing context appropriately.
|
||||
*/
|
||||
public static void parseXContentFields(XContentParser parser, Builder context) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
String currentFieldName = parser.currentName();
|
||||
|
||||
if (GET.equals(currentFieldName)) {
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
context.setGetResult(GetResult.fromXContentEmbedded(parser));
|
||||
}
|
||||
} else {
|
||||
DocWriteResponse.parseInnerToXContent(parser, context);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Builder class for {@link UpdateResponse}. This builder is usually used during xcontent parsing to
|
||||
* temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to
|
||||
* instantiate the {@link UpdateResponse}.
|
||||
*/
|
||||
public static class Builder extends DocWriteResponse.Builder {
|
||||
|
||||
private GetResult getResult = null;
|
||||
|
||||
public void setGetResult(GetResult getResult) {
|
||||
this.getResult = getResult;
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpdateResponse build() {
|
||||
UpdateResponse update;
|
||||
if (shardInfo != null && seqNo != null) {
|
||||
update = new UpdateResponse(shardInfo, shardId, type, id, seqNo, version, result);
|
||||
} else {
|
||||
update = new UpdateResponse(shardId, type, id, version, result);
|
||||
}
|
||||
if (getResult != null) {
|
||||
update.setGetResult(new GetResult(update.getIndex(), update.getType(), update.getId(), update.getVersion(),
|
||||
getResult.isExists(),getResult.internalSourceRef(), getResult.getFields()));
|
||||
}
|
||||
update.setForcedRefresh(forcedRefresh);
|
||||
return update;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.Mapping;
|
||||
|
@ -68,7 +69,7 @@ public class MappingUpdatedAction extends AbstractComponent {
|
|||
if (type.equals(MapperService.DEFAULT_MAPPING)) {
|
||||
throw new IllegalArgumentException("_default_ mapping should not be updated");
|
||||
}
|
||||
return client.preparePutMapping().setConcreteIndex(index).setType(type).setSource(mappingUpdate.toString())
|
||||
return client.preparePutMapping().setConcreteIndex(index).setType(type).setSource(mappingUpdate.toString(), XContentType.JSON)
|
||||
.setMasterNodeTimeout(timeout).setTimeout(timeout);
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.io.FastStringReader;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
|
||||
|
@ -712,10 +711,12 @@ public class Strings {
|
|||
* @return the delimited String
|
||||
*/
|
||||
public static String collectionToDelimitedString(Iterable<?> coll, String delim, String prefix, String suffix) {
|
||||
return collectionToDelimitedString(coll, delim, prefix, suffix, new StringBuilder());
|
||||
StringBuilder sb = new StringBuilder();
|
||||
collectionToDelimitedString(coll, delim, prefix, suffix, sb);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public static String collectionToDelimitedString(Iterable<?> coll, String delim, String prefix, String suffix, StringBuilder sb) {
|
||||
public static void collectionToDelimitedString(Iterable<?> coll, String delim, String prefix, String suffix, StringBuilder sb) {
|
||||
Iterator<?> it = coll.iterator();
|
||||
while (it.hasNext()) {
|
||||
sb.append(prefix).append(it.next()).append(suffix);
|
||||
|
@ -723,7 +724,6 @@ public class Strings {
|
|||
sb.append(delim);
|
||||
}
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -758,12 +758,14 @@ public class Strings {
|
|||
* @return the delimited String
|
||||
*/
|
||||
public static String arrayToDelimitedString(Object[] arr, String delim) {
|
||||
return arrayToDelimitedString(arr, delim, new StringBuilder());
|
||||
StringBuilder sb = new StringBuilder();
|
||||
arrayToDelimitedString(arr, delim, sb);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public static String arrayToDelimitedString(Object[] arr, String delim, StringBuilder sb) {
|
||||
public static void arrayToDelimitedString(Object[] arr, String delim, StringBuilder sb) {
|
||||
if (isEmpty(arr)) {
|
||||
return "";
|
||||
return;
|
||||
}
|
||||
for (int i = 0; i < arr.length; i++) {
|
||||
if (i > 0) {
|
||||
|
@ -771,7 +773,6 @@ public class Strings {
|
|||
}
|
||||
sb.append(arr[i]);
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -63,7 +63,7 @@ public class BlobPath implements Iterable<String> {
|
|||
|
||||
public String buildAsString() {
|
||||
String p = String.join(SEPARATOR, paths);
|
||||
if (p.isEmpty()) {
|
||||
if (p.isEmpty() || p.endsWith(SEPARATOR)) {
|
||||
return p;
|
||||
}
|
||||
return p + SEPARATOR;
|
||||
|
|
|
@ -448,12 +448,20 @@ public class XContentHelper {
|
|||
* {@link XContentType}. Wraps the output into a new anonymous object.
|
||||
*/
|
||||
public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, boolean humanReadable) throws IOException {
|
||||
return toXContent(toXContent, xContentType, ToXContent.EMPTY_PARAMS, humanReadable);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the bytes that represent the XContent output of the provided {@link ToXContent} object, using the provided
|
||||
* {@link XContentType}. Wraps the output into a new anonymous object.
|
||||
*/
|
||||
public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, Params params, boolean humanReadable) throws IOException {
|
||||
try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) {
|
||||
builder.humanReadable(humanReadable);
|
||||
if (toXContent.isFragment()) {
|
||||
builder.startObject();
|
||||
}
|
||||
toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
toXContent.toXContent(builder, params);
|
||||
if (toXContent.isFragment()) {
|
||||
builder.endObject();
|
||||
}
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public class MembershipAction extends AbstractComponent {
|
||||
|
||||
|
@ -63,8 +62,7 @@ public class MembershipAction extends AbstractComponent {
|
|||
|
||||
private final MembershipListener listener;
|
||||
|
||||
public MembershipAction(Settings settings, TransportService transportService,
|
||||
Supplier<DiscoveryNode> localNodeSupplier, MembershipListener listener) {
|
||||
public MembershipAction(Settings settings, TransportService transportService, MembershipListener listener) {
|
||||
super(settings);
|
||||
this.transportService = transportService;
|
||||
this.listener = listener;
|
||||
|
@ -73,7 +71,7 @@ public class MembershipAction extends AbstractComponent {
|
|||
transportService.registerRequestHandler(DISCOVERY_JOIN_ACTION_NAME, JoinRequest::new,
|
||||
ThreadPool.Names.GENERIC, new JoinRequestRequestHandler());
|
||||
transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME,
|
||||
() -> new ValidateJoinRequest(localNodeSupplier), ThreadPool.Names.GENERIC,
|
||||
() -> new ValidateJoinRequest(), ThreadPool.Names.GENERIC,
|
||||
new ValidateJoinRequestRequestHandler());
|
||||
transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new,
|
||||
ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler());
|
||||
|
@ -155,22 +153,18 @@ public class MembershipAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
static class ValidateJoinRequest extends TransportRequest {
|
||||
private final Supplier<DiscoveryNode> localNode;
|
||||
private ClusterState state;
|
||||
|
||||
ValidateJoinRequest(Supplier<DiscoveryNode> localNode) {
|
||||
this.localNode = localNode;
|
||||
}
|
||||
ValidateJoinRequest() {}
|
||||
|
||||
ValidateJoinRequest(ClusterState state) {
|
||||
this.state = state;
|
||||
this.localNode = state.nodes()::getLocalNode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
this.state = ClusterState.readFrom(in, localNode.get());
|
||||
this.state = ClusterState.readFrom(in, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -191,7 +191,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
new NewPendingClusterStateListener(),
|
||||
discoverySettings,
|
||||
clusterService.getClusterName());
|
||||
this.membership = new MembershipAction(settings, transportService, this::localNode, new MembershipListener());
|
||||
this.membership = new MembershipAction(settings, transportService, new MembershipListener());
|
||||
this.joinThreadControl = new JoinThreadControl();
|
||||
|
||||
transportService.registerRequestHandler(
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.http;
|
||||
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.transport.PortsRange;
|
||||
|
@ -69,7 +70,14 @@ public final class HttpTransportSettings {
|
|||
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED =
|
||||
Setting.boolSetting("http.detailed_errors.enabled", true, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_HTTP_CONTENT_TYPE_REQUIRED =
|
||||
Setting.boolSetting("http.content_type.required", false, Property.NodeScope);
|
||||
new Setting<>("http.content_type.required", (s) -> Boolean.toString(true), (s) -> {
|
||||
final boolean value = Booleans.parseBoolean(s);
|
||||
if (value == false) {
|
||||
throw new IllegalArgumentException("http.content_type.required cannot be set to false. It exists only to make a rolling" +
|
||||
" upgrade easier");
|
||||
}
|
||||
return true;
|
||||
}, Property.NodeScope, Property.Deprecated);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH =
|
||||
Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE =
|
||||
|
|
|
@ -391,6 +391,14 @@ public abstract class Engine implements Closeable {
|
|||
this.created = created;
|
||||
}
|
||||
|
||||
/**
|
||||
* use in case of index operation failed before getting to internal engine
|
||||
* (e.g while preparing operation or updating mappings)
|
||||
* */
|
||||
public IndexResult(Exception failure, long version) {
|
||||
this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO);
|
||||
}
|
||||
|
||||
public IndexResult(Exception failure, long version, long seqNo) {
|
||||
super(Operation.TYPE.INDEX, failure, version, seqNo);
|
||||
this.created = false;
|
||||
|
|
|
@ -188,7 +188,7 @@ public final class EngineConfig {
|
|||
|
||||
/**
|
||||
* Returns a thread-pool mainly used to get estimated time stamps from
|
||||
* {@link org.elasticsearch.threadpool.ThreadPool#estimatedTimeInMillis()} and to schedule
|
||||
* {@link org.elasticsearch.threadpool.ThreadPool#relativeTimeInMillis()} and to schedule
|
||||
* async force merge calls on the {@link org.elasticsearch.threadpool.ThreadPool.Names#FORCE_MERGE} thread-pool
|
||||
*/
|
||||
public ThreadPool getThreadPool() {
|
||||
|
|
|
@ -147,7 +147,7 @@ public class InternalEngine extends Engine {
|
|||
EngineMergeScheduler scheduler = null;
|
||||
boolean success = false;
|
||||
try {
|
||||
this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis();
|
||||
this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().relativeTimeInMillis();
|
||||
|
||||
mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings());
|
||||
throttle = new IndexThrottle();
|
||||
|
@ -446,7 +446,7 @@ public class InternalEngine extends Engine {
|
|||
|
||||
private long checkDeletedAndGCed(VersionValue versionValue) {
|
||||
long currentVersion;
|
||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) {
|
||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().relativeTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) {
|
||||
currentVersion = Versions.NOT_FOUND; // deleted, and GC
|
||||
} else {
|
||||
currentVersion = versionValue.version();
|
||||
|
@ -478,6 +478,20 @@ public class InternalEngine extends Engine {
|
|||
return false;
|
||||
}
|
||||
|
||||
private boolean assertVersionType(final Engine.Operation operation) {
|
||||
if (operation.origin() == Operation.Origin.REPLICA ||
|
||||
operation.origin() == Operation.Origin.PEER_RECOVERY ||
|
||||
operation.origin() == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||
// ensure that replica operation has expected version type for replication
|
||||
// ensure that versionTypeForReplicationAndRecovery is idempotent
|
||||
assert operation.versionType() == operation.versionType().versionTypeForReplicationAndRecovery()
|
||||
: "unexpected version type in request from [" + operation.origin().name() + "] " +
|
||||
"found [" + operation.versionType().name() + "] " +
|
||||
"expected [" + operation.versionType().versionTypeForReplicationAndRecovery().name() + "]";
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean assertSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) {
|
||||
if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) && origin == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||
// legacy support
|
||||
|
@ -499,6 +513,7 @@ public class InternalEngine extends Engine {
|
|||
try (ReleasableLock releasableLock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
assert assertSequenceNumber(index.origin(), index.seqNo());
|
||||
assert assertVersionType(index);
|
||||
final Translog.Location location;
|
||||
long seqNo = index.seqNo();
|
||||
try (Releasable ignored = acquireLock(index.uid());
|
||||
|
@ -692,6 +707,7 @@ public class InternalEngine extends Engine {
|
|||
public DeleteResult delete(Delete delete) throws IOException {
|
||||
DeleteResult result;
|
||||
try (ReleasableLock ignored = readLock.acquire()) {
|
||||
assert assertVersionType(delete);
|
||||
ensureOpen();
|
||||
// NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments:
|
||||
result = innerDelete(delete);
|
||||
|
@ -710,7 +726,7 @@ public class InternalEngine extends Engine {
|
|||
private void maybePruneDeletedTombstones() {
|
||||
// It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it
|
||||
// every 1/4 of gcDeletesInMillis:
|
||||
if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().estimatedTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) {
|
||||
if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) {
|
||||
pruneDeletedTombstones();
|
||||
}
|
||||
}
|
||||
|
@ -756,7 +772,7 @@ public class InternalEngine extends Engine {
|
|||
deleteResult = new DeleteResult(updatedVersion, seqNo, found);
|
||||
|
||||
versionMap.putUnderLock(delete.uid().bytes(),
|
||||
new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis()));
|
||||
new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().relativeTimeInMillis()));
|
||||
}
|
||||
if (!deleteResult.hasFailure()) {
|
||||
location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY
|
||||
|
@ -1031,7 +1047,7 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
private void pruneDeletedTombstones() {
|
||||
long timeMSec = engineConfig.getThreadPool().estimatedTimeInMillis();
|
||||
long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis();
|
||||
|
||||
// TODO: not good that we reach into LiveVersionMap here; can we move this inside VersionMap instead? problem is the dirtyLock...
|
||||
|
||||
|
|
|
@ -162,7 +162,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
|||
}
|
||||
|
||||
public static class GeoPointFieldType extends MappedFieldType {
|
||||
GeoPointFieldType() {
|
||||
public GeoPointFieldType() {
|
||||
}
|
||||
|
||||
GeoPointFieldType(GeoPointFieldType ref) {
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.TermInSetQuery;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
|
@ -114,12 +114,12 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
|||
@Override
|
||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
||||
final BytesRef[] uids = Uid.createUidsForTypesAndId(context.queryTypes(), value);
|
||||
return new TermsQuery(UidFieldMapper.NAME, uids);
|
||||
return new TermInSetQuery(UidFieldMapper.NAME, uids);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termsQuery(List values, @Nullable QueryShardContext context) {
|
||||
return new TermsQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(context.queryTypes(), values));
|
||||
return new TermInSetQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(context.queryTypes(), values));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ public class IpFieldMapper extends FieldMapper {
|
|||
|
||||
public static final class IpFieldType extends MappedFieldType {
|
||||
|
||||
IpFieldType() {
|
||||
public IpFieldType() {
|
||||
super();
|
||||
setTokenized(false);
|
||||
setHasDocValues(true);
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.index.mapper;
|
|||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.TermInSetQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
|
@ -53,7 +53,7 @@ public abstract class StringFieldType extends TermBasedFieldType {
|
|||
for (int i = 0; i < bytesRefs.length; i++) {
|
||||
bytesRefs[i] = indexedValueForSearch(values.get(i));
|
||||
}
|
||||
return new TermsQuery(name(), bytesRefs);
|
||||
return new TermInSetQuery(name(), bytesRefs);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,9 +22,9 @@ package org.elasticsearch.index.mapper;
|
|||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermInSetQuery;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -66,7 +66,7 @@ abstract class TermBasedFieldType extends MappedFieldType {
|
|||
for (int i = 0; i < bytesRefs.length; i++) {
|
||||
bytesRefs[i] = indexedValueForSearch(values.get(i));
|
||||
}
|
||||
return new TermsQuery(name(), bytesRefs);
|
||||
return new TermInSetQuery(name(), bytesRefs);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,13 +26,13 @@ import org.apache.lucene.index.IndexOptions;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermContext;
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TermInSetQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
|
@ -172,7 +172,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||
* Specialization for a disjunction over many _type
|
||||
*/
|
||||
public static class TypesQuery extends Query {
|
||||
// Same threshold as TermsQuery
|
||||
// Same threshold as TermInSetQuery
|
||||
private static final int BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD = 16;
|
||||
|
||||
private final BytesRef[] types;
|
||||
|
@ -220,7 +220,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
return new ConstantScoreQuery(bq.build());
|
||||
}
|
||||
return new TermsQuery(CONTENT_TYPE, types);
|
||||
return new TermInSetQuery(CONTENT_TYPE, types);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue