Merge branch 'master' into feature/rank-eval

This commit is contained in:
Christoph Büscher 2017-02-27 11:25:17 +01:00
commit 1f4c4d99b9
432 changed files with 9910 additions and 7197 deletions

32
Vagrantfile vendored
View File

@ -42,7 +42,7 @@ Vagrant.configure(2) do |config|
# debian and it works fine.
config.vm.define "debian-8" do |config|
config.vm.box = "elastic/debian-8-x86_64"
deb_common config, 'echo deb http://cloudfront.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
deb_common config
end
config.vm.define "centos-6" do |config|
config.vm.box = "elastic/centos-6-x86_64"
@ -114,10 +114,10 @@ SOURCE_PROMPT
end
def ubuntu_common(config, extra: '')
deb_common config, 'apt-add-repository -y ppa:openjdk-r/ppa > /dev/null 2>&1', 'openjdk-r-*', extra: extra
deb_common config, extra: extra
end
def deb_common(config, add_openjdk_repository_command, openjdk_list, extra: '')
def deb_common(config, extra: '')
# http://foo-o-rama.com/vagrant--stdin-is-not-a-tty--fix.html
config.vm.provision "fix-no-tty", type: "shell" do |s|
s.privileged = false
@ -127,24 +127,14 @@ def deb_common(config, add_openjdk_repository_command, openjdk_list, extra: '')
update_command: "apt-get update",
update_tracking_file: "/var/cache/apt/archives/last_update",
install_command: "apt-get install -y",
java_package: "openjdk-8-jdk",
extra: <<-SHELL
export DEBIAN_FRONTEND=noninteractive
ls /etc/apt/sources.list.d/#{openjdk_list}.list > /dev/null 2>&1 ||
(echo "==> Importing java-8 ppa" &&
#{add_openjdk_repository_command} &&
apt-get update)
#{extra}
SHELL
)
extra: extra)
end
def rpm_common(config)
provision(config,
update_command: "yum check-update",
update_tracking_file: "/var/cache/yum/last_update",
install_command: "yum install -y",
java_package: "java-1.8.0-openjdk-devel")
install_command: "yum install -y")
end
def dnf_common(config)
@ -152,8 +142,7 @@ def dnf_common(config)
update_command: "dnf check-update",
update_tracking_file: "/var/cache/dnf/last_update",
install_command: "dnf install -y",
install_command_retries: 5,
java_package: "java-1.8.0-openjdk-devel")
install_command_retries: 5)
if Vagrant.has_plugin?("vagrant-cachier")
# Autodetect doesn't work....
config.cache.auto_detect = false
@ -170,7 +159,6 @@ def suse_common(config, extra)
update_command: "zypper --non-interactive list-updates",
update_tracking_file: "/var/cache/zypp/packages/last_update",
install_command: "zypper --non-interactive --quiet install --no-recommends",
java_package: "java-1_8_0-openjdk-devel",
extra: extra)
end
@ -193,7 +181,6 @@ end
# is cached by vagrant-cachier.
# @param install_command [String] The command used to install a package.
# Required. Think `apt-get install #{package}`.
# @param java_package [String] The name of the java package. Required.
# @param extra [String] Extra provisioning commands run before anything else.
# Optional. Used for things like setting up the ppa for Java 8.
def provision(config,
@ -201,13 +188,11 @@ def provision(config,
update_tracking_file: 'required',
install_command: 'required',
install_command_retries: 0,
java_package: 'required',
extra: '')
# Vagrant run ruby 2.0.0 which doesn't have required named parameters....
raise ArgumentError.new('update_command is required') if update_command == 'required'
raise ArgumentError.new('update_tracking_file is required') if update_tracking_file == 'required'
raise ArgumentError.new('install_command is required') if install_command == 'required'
raise ArgumentError.new('java_package is required') if java_package == 'required'
config.vm.provision "bats dependencies", type: "shell", inline: <<-SHELL
set -e
set -o pipefail
@ -254,7 +239,10 @@ def provision(config,
#{extra}
installed java || install #{java_package}
installed java || {
echo "==> Java is not installed on vagrant box ${config.vm.box}"
return 1
}
ensure tar
ensure curl
ensure unzip

View File

@ -37,10 +37,7 @@ apply plugin: 'application'
archivesBaseName = 'elasticsearch-benchmarks'
mainClassName = 'org.openjdk.jmh.Main'
// never try to invoke tests on the benchmark project - there aren't any
check.dependsOn.remove(test)
// explicitly override the test task too in case somebody invokes 'gradle test' so it won't trip
task test(type: Test, overwrite: true)
test.enabled = false
dependencies {
compile("org.elasticsearch:elasticsearch:${version}") {
@ -59,7 +56,6 @@ compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-u
// enable the JMH's BenchmarkProcessor to generate the final benchmark classes
// needs to be added separately otherwise Gradle will quote it and javac will fail
compileJava.options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"])
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
forbiddenApis {
// classes generated by JMH can use all sorts of forbidden APIs but we have no influence at all and cannot exclude these classes

View File

@ -120,12 +120,15 @@ public class PluginBuildPlugin extends BuildPlugin {
// add the plugin properties and metadata to test resources, so unit tests can
// know about the plugin (used by test security code to statically initialize the plugin in unit tests)
SourceSet testSourceSet = project.sourceSets.test
testSourceSet.output.dir(buildProperties.generatedResourcesDir, builtBy: 'pluginProperties')
testSourceSet.output.dir(buildProperties.descriptorOutput.parentFile, builtBy: 'pluginProperties')
testSourceSet.resources.srcDir(pluginMetadata)
// create the actual bundle task, which zips up all the files for the plugin
Zip bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [project.jar, buildProperties]) {
from buildProperties // plugin properties file
from(buildProperties.descriptorOutput.parentFile) {
// plugin properties file
include(buildProperties.descriptorOutput.name)
}
from pluginMetadata // metadata (eg custom security policy)
from project.jar // this plugin's jar
from project.configurations.runtime - project.configurations.provided // the dep jars
@ -250,19 +253,15 @@ public class PluginBuildPlugin extends BuildPlugin {
protected void addNoticeGeneration(Project project) {
File licenseFile = project.pluginProperties.extension.licenseFile
if (licenseFile != null) {
project.bundlePlugin.into('/') {
from(licenseFile.parentFile) {
include(licenseFile.name)
}
project.bundlePlugin.from(licenseFile.parentFile) {
include(licenseFile.name)
}
}
File noticeFile = project.pluginProperties.extension.licenseFile
if (noticeFile != null) {
NoticeTask generateNotice = project.tasks.create('generateNotice', NoticeTask.class)
generateNotice.dependencies(project)
project.bundlePlugin.into('/') {
from(generateNotice)
}
project.bundlePlugin.from(generateNotice)
}
}
}

View File

@ -22,6 +22,7 @@ import org.elasticsearch.gradle.VersionProperties
import org.gradle.api.InvalidUserDataException
import org.gradle.api.Task
import org.gradle.api.tasks.Copy
import org.gradle.api.tasks.OutputFile
/**
* Creates a plugin descriptor.
@ -29,20 +30,22 @@ import org.gradle.api.tasks.Copy
class PluginPropertiesTask extends Copy {
PluginPropertiesExtension extension
File generatedResourcesDir = new File(project.buildDir, 'generated-resources')
@OutputFile
File descriptorOutput = new File(project.buildDir, 'generated-resources/plugin-descriptor.properties')
PluginPropertiesTask() {
File templateFile = new File(project.buildDir, 'templates/plugin-descriptor.properties')
File templateFile = new File(project.buildDir, "templates/${descriptorOutput.name}")
Task copyPluginPropertiesTemplate = project.tasks.create('copyPluginPropertiesTemplate') {
doLast {
InputStream resourceTemplate = PluginPropertiesTask.getResourceAsStream('/plugin-descriptor.properties')
InputStream resourceTemplate = PluginPropertiesTask.getResourceAsStream("/${descriptorOutput.name}")
templateFile.parentFile.mkdirs()
templateFile.setText(resourceTemplate.getText('UTF-8'), 'UTF-8')
}
}
dependsOn(copyPluginPropertiesTemplate)
extension = project.extensions.create('esplugin', PluginPropertiesExtension, project)
project.clean.delete(generatedResourcesDir)
project.afterEvaluate {
// check require properties are set
if (extension.name == null) {
@ -55,8 +58,8 @@ class PluginPropertiesTask extends Copy {
throw new InvalidUserDataException('classname is a required setting for esplugin')
}
// configure property substitution
from(templateFile)
into(generatedResourcesDir)
from(templateFile.parentFile).include(descriptorOutput.name)
into(descriptorOutput.parentFile)
Map<String, String> properties = generateSubstitutions()
expand(properties)
inputs.properties(properties)

View File

@ -91,6 +91,7 @@ class PrecommitTasks {
if (testForbidden != null) {
testForbidden.configure {
signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt')
signaturesURLs += getClass().getResource('/forbidden/http-signatures.txt')
}
}
Task forbiddenApis = project.tasks.findByName('forbiddenApis')

View File

@ -51,22 +51,18 @@ class ClusterFormationTasks {
*
* Returns a list of NodeInfo objects for each node in the cluster.
*/
static List<NodeInfo> setup(Project project, Task task, ClusterConfiguration config) {
if (task.getEnabled() == false) {
// no need to add cluster formation tasks if the task won't run!
return
}
static List<NodeInfo> setup(Project project, String prefix, Task runner, ClusterConfiguration config) {
File sharedDir = new File(project.buildDir, "cluster/shared")
// first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything
// in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk
// such that snapshots survive failures / test runs and there is no simple way today to fix that.
Task cleanup = project.tasks.create(name: "${task.name}#prepareCluster.cleanShared", type: Delete, dependsOn: task.dependsOn.collect()) {
Task cleanup = project.tasks.create(name: "${prefix}#prepareCluster.cleanShared", type: Delete, dependsOn: runner.dependsOn.collect()) {
delete sharedDir
doLast {
sharedDir.mkdirs()
}
}
List<Task> startTasks = [cleanup]
List<Task> startTasks = []
List<NodeInfo> nodes = []
if (config.numNodes < config.numBwcNodes) {
throw new GradleException("numNodes must be >= numBwcNodes [${config.numNodes} < ${config.numBwcNodes}]")
@ -75,7 +71,7 @@ class ClusterFormationTasks {
throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0")
}
// this is our current version distribution configuration we use for all kinds of REST tests etc.
String distroConfigName = "${task.name}_elasticsearchDistro"
String distroConfigName = "${prefix}_elasticsearchDistro"
Configuration currentDistro = project.configurations.create(distroConfigName)
configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch)
if (config.bwcVersion != null && config.numBwcNodes > 0) {
@ -89,7 +85,7 @@ class ClusterFormationTasks {
}
configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion)
for (Map.Entry<String, Project> entry : config.plugins.entrySet()) {
configureBwcPluginDependency("${task.name}_elasticsearchBwcPlugins", project, entry.getValue(),
configureBwcPluginDependency("${prefix}_elasticsearchBwcPlugins", project, entry.getValue(),
project.configurations.elasticsearchBwcPlugins, config.bwcVersion)
}
project.configurations.elasticsearchBwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
@ -104,13 +100,14 @@ class ClusterFormationTasks {
elasticsearchVersion = config.bwcVersion
distro = project.configurations.elasticsearchBwcDistro
}
NodeInfo node = new NodeInfo(config, i, project, task, elasticsearchVersion, sharedDir)
NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir)
nodes.add(node)
startTasks.add(configureNode(project, task, cleanup, node, distro, nodes.get(0)))
Task dependsOn = startTasks.empty ? cleanup : startTasks.get(0)
startTasks.add(configureNode(project, prefix, runner, dependsOn, node, distro, nodes.get(0)))
}
Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks)
task.dependsOn(wait)
Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks)
runner.dependsOn(wait)
return nodes
}
@ -150,58 +147,58 @@ class ClusterFormationTasks {
*
* @return a task which starts the node.
*/
static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) {
static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) {
// tasks are chained so their execution order is maintained
Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: dependsOn) {
Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) {
delete node.homeDir
delete node.cwd
doLast {
node.cwd.mkdirs()
}
}
setup = configureCheckPreviousTask(taskName(task, node, 'checkPrevious'), project, setup, node)
setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node)
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration)
setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node, seedNode)
setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node)
setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node)
setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, configuration)
setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode)
if (node.config.plugins.isEmpty() == false) {
if (node.nodeVersion == VersionProperties.elasticsearch) {
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node)
} else {
setup = configureCopyBwcPluginsTask(taskName(task, node, 'copyBwcPlugins'), project, setup, node)
setup = configureCopyBwcPluginsTask(taskName(prefix, node, 'copyBwcPlugins'), project, setup, node)
}
}
// install modules
for (Project module : node.config.modules) {
String actionName = pluginTaskName('install', module.name, 'Module')
setup = configureInstallModuleTask(taskName(task, node, actionName), project, setup, node, module)
setup = configureInstallModuleTask(taskName(prefix, node, actionName), project, setup, node, module)
}
// install plugins
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue())
setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, plugin.getValue())
}
// sets up any extra config files that need to be copied over to the ES instance;
// its run after plugins have been installed, as the extra config files may belong to plugins
setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
setup = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, setup, node)
// extra setup commands
for (Map.Entry<String, Object[]> command : node.config.setupCommands.entrySet()) {
// the first argument is the actual script name, relative to home
Object[] args = command.getValue().clone()
args[0] = new File(node.homeDir, args[0].toString())
setup = configureExecTask(taskName(task, node, command.getKey()), project, setup, node, args)
setup = configureExecTask(taskName(prefix, node, command.getKey()), project, setup, node, args)
}
Task start = configureStartTask(taskName(task, node, 'start'), project, setup, node)
Task start = configureStartTask(taskName(prefix, node, 'start'), project, setup, node)
if (node.config.daemonize) {
Task stop = configureStopTask(taskName(task, node, 'stop'), project, [], node)
Task stop = configureStopTask(taskName(prefix, node, 'stop'), project, [], node)
// if we are running in the background, make sure to stop the server when the task completes
task.finalizedBy(stop)
runner.finalizedBy(stop)
start.finalizedBy(stop)
}
return start
@ -648,11 +645,11 @@ class ClusterFormationTasks {
}
/** Returns a unique task name for this task and node configuration */
static String taskName(Task parentTask, NodeInfo node, String action) {
static String taskName(String prefix, NodeInfo node, String action) {
if (node.config.numNodes > 1) {
return "${parentTask.name}#node${node.nodeNum}.${action}"
return "${prefix}#node${node.nodeNum}.${action}"
} else {
return "${parentTask.name}#${action}"
return "${prefix}#${action}"
}
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.gradle.test
import org.apache.tools.ant.taskdefs.condition.Os
import org.gradle.api.InvalidUserDataException
import org.gradle.api.Project
import org.gradle.api.Task
/**
* A container for the files and configuration associated with a single node in a test cluster.
@ -96,17 +95,17 @@ class NodeInfo {
/** the version of elasticsearch that this node runs */
String nodeVersion
/** Creates a node to run as part of a cluster for the given task */
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task, String nodeVersion, File sharedDir) {
/** Holds node configuration for part of a test cluster. */
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, String nodeVersion, File sharedDir) {
this.config = config
this.nodeNum = nodeNum
this.sharedDir = sharedDir
if (config.clusterName != null) {
clusterName = config.clusterName
} else {
clusterName = "${task.path.replace(':', '_').substring(1)}"
clusterName = project.path.replace(':', '_').substring(1) + '_' + prefix
}
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}")
pidFile = new File(baseDir, 'es.pid')
this.nodeVersion = nodeVersion
homeDir = homeDir(baseDir, config.distribution, nodeVersion)

View File

@ -20,6 +20,7 @@ package org.elasticsearch.gradle.test
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
import org.elasticsearch.gradle.BuildPlugin
import org.gradle.api.DefaultTask
import org.gradle.api.Task
import org.gradle.api.internal.tasks.options.Option
import org.gradle.api.plugins.JavaBasePlugin
@ -27,12 +28,15 @@ import org.gradle.api.tasks.Input
import org.gradle.util.ConfigureUtil
/**
* Runs integration tests, but first starts an ES cluster,
* and passes the ES cluster info as parameters to the tests.
* A wrapper task around setting up a cluster and running rest tests.
*/
public class RestIntegTestTask extends RandomizedTestingTask {
public class RestIntegTestTask extends DefaultTask {
ClusterConfiguration clusterConfig
protected ClusterConfiguration clusterConfig
protected RandomizedTestingTask runner
protected Task clusterInit
/** Info about nodes in the integ test cluster. Note this is *not* available until runtime. */
List<NodeInfo> nodes
@ -44,35 +48,44 @@ public class RestIntegTestTask extends RandomizedTestingTask {
public RestIntegTestTask() {
description = 'Runs rest tests against an elasticsearch cluster.'
group = JavaBasePlugin.VERIFICATION_GROUP
dependsOn(project.testClasses)
classpath = project.sourceSets.test.runtimeClasspath
testClassesDir = project.sourceSets.test.output.classesDir
clusterConfig = new ClusterConfiguration(project)
runner = project.tasks.create("${name}Runner", RandomizedTestingTask.class)
super.dependsOn(runner)
clusterInit = project.tasks.create(name: "${name}Cluster#init", dependsOn: project.testClasses)
runner.dependsOn(clusterInit)
runner.classpath = project.sourceSets.test.runtimeClasspath
runner.testClassesDir = project.sourceSets.test.output.classesDir
clusterConfig = project.extensions.create("${name}Cluster", ClusterConfiguration.class, project)
// start with the common test configuration
configure(BuildPlugin.commonTestConfig(project))
runner.configure(BuildPlugin.commonTestConfig(project))
// override/add more for rest tests
parallelism = '1'
include('**/*IT.class')
systemProperty('tests.rest.load_packaged', 'false')
runner.parallelism = '1'
runner.include('**/*IT.class')
runner.systemProperty('tests.rest.load_packaged', 'false')
// we pass all nodes to the rest cluster to allow the clients to round-robin between them
// this is more realistic than just talking to a single node
systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}")
systemProperty('tests.config.dir', "${-> nodes[0].confDir}")
runner.systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}")
runner.systemProperty('tests.config.dir', "${-> nodes[0].confDir}")
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
// both as separate sysprops
systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
// copy the rest spec/tests into the test resources
RestSpecHack.configureDependencies(project)
project.afterEvaluate {
dependsOn(RestSpecHack.configureTask(project, includePackaged))
runner.dependsOn(RestSpecHack.configureTask(project, includePackaged))
}
// this must run after all projects have been configured, so we know any project
// references can be accessed as a fully configured
project.gradle.projectsEvaluated {
nodes = ClusterFormationTasks.setup(project, this, clusterConfig)
if (enabled == false) {
runner.enabled = false
clusterInit.enabled = false
return // no need to add cluster formation tasks if the task won't run!
}
nodes = ClusterFormationTasks.setup(project, "${name}Cluster", runner, clusterConfig)
super.dependsOn(runner.finalizedBy)
}
}
@ -84,25 +97,16 @@ public class RestIntegTestTask extends RandomizedTestingTask {
clusterConfig.debug = enabled;
}
@Input
public void cluster(Closure closure) {
ConfigureUtil.configure(closure, clusterConfig)
}
public ClusterConfiguration getCluster() {
return clusterConfig
}
public List<NodeInfo> getNodes() {
return nodes
}
@Override
public Task dependsOn(Object... dependencies) {
super.dependsOn(dependencies)
runner.dependsOn(dependencies)
for (Object dependency : dependencies) {
if (dependency instanceof Fixture) {
finalizedBy(((Fixture)dependency).stopTask)
runner.finalizedBy(((Fixture)dependency).stopTask)
}
}
return this
@ -110,11 +114,16 @@ public class RestIntegTestTask extends RandomizedTestingTask {
@Override
public void setDependsOn(Iterable<?> dependencies) {
super.setDependsOn(dependencies)
runner.setDependsOn(dependencies)
for (Object dependency : dependencies) {
if (dependency instanceof Fixture) {
finalizedBy(((Fixture)dependency).stopTask)
runner.finalizedBy(((Fixture)dependency).stopTask)
}
}
}
@Override
public Task mustRunAfter(Object... tasks) {
clusterInit.mustRunAfter(tasks)
}
}

View File

@ -43,7 +43,7 @@ public class RestTestPlugin implements Plugin<Project> {
}
RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
integTest.cluster.distribution = 'zip' // rest tests should run with the real zip
integTest.clusterConfig.distribution = 'zip' // rest tests should run with the real zip
integTest.mustRunAfter(project.precommit)
project.check.dependsOn(integTest)
}

View File

@ -18,7 +18,7 @@ public class RunTask extends DefaultTask {
clusterConfig.daemonize = false
clusterConfig.distribution = 'zip'
project.afterEvaluate {
ClusterFormationTasks.setup(project, this, clusterConfig)
ClusterFormationTasks.setup(project, name, this, clusterConfig)
}
}

View File

@ -1,14 +1,14 @@
package org.elasticsearch.gradle.vagrant
import org.elasticsearch.gradle.FileContentsTask
import org.gradle.BuildAdapter
import org.gradle.BuildResult
import org.gradle.api.*
import org.gradle.api.artifacts.dsl.RepositoryHandler
import org.gradle.api.execution.TaskExecutionAdapter
import org.gradle.api.internal.artifacts.dependencies.DefaultProjectDependency
import org.gradle.api.tasks.Copy
import org.gradle.api.tasks.Delete
import org.gradle.api.tasks.Exec
import org.gradle.api.tasks.TaskState
class VagrantTestPlugin implements Plugin<Project> {
@ -16,10 +16,8 @@ class VagrantTestPlugin implements Plugin<Project> {
static List<String> BOXES = [
'centos-6',
'centos-7',
// TODO: re-enable debian once it does not have broken openjdk packages
//'debian-8',
// TODO: re-enable fedora once it does not have broken openjdk packages
//'fedora-24',
'debian-8',
'fedora-24',
'oel-6',
'oel-7',
'opensuse-13',
@ -125,33 +123,27 @@ class VagrantTestPlugin implements Plugin<Project> {
private static void createBatsConfiguration(Project project) {
project.configurations.create(BATS)
Long seed
String formattedSeed = null
String[] upgradeFromVersions
String maybeTestsSeed = System.getProperty("tests.seed", null);
final long seed
final String formattedSeed
String maybeTestsSeed = System.getProperty("tests.seed")
if (maybeTestsSeed != null) {
List<String> seeds = maybeTestsSeed.tokenize(':')
if (seeds.size() != 0) {
String masterSeed = seeds.get(0)
seed = new BigInteger(masterSeed, 16).longValue()
formattedSeed = maybeTestsSeed
if (maybeTestsSeed.trim().isEmpty()) {
throw new GradleException("explicit tests.seed cannot be empty")
}
}
if (formattedSeed == null) {
String masterSeed = maybeTestsSeed.tokenize(':').get(0)
seed = new BigInteger(masterSeed, 16).longValue()
formattedSeed = maybeTestsSeed
} else {
seed = new Random().nextLong()
formattedSeed = String.format("%016X", seed)
}
String maybeUpdradeFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null)
if (maybeUpdradeFromVersions != null) {
upgradeFromVersions = maybeUpdradeFromVersions.split(",")
} else {
upgradeFromVersions = getVersionsFile(project)
String upgradeFromVersion = System.getProperty("tests.packaging.upgradeVersion");
if (upgradeFromVersion == null) {
List<String> availableVersions = getVersionsFile(project).readLines('UTF-8')
upgradeFromVersion = availableVersions[new Random(seed).nextInt(availableVersions.size())]
}
String upgradeFromVersion = upgradeFromVersions[new Random(seed).nextInt(upgradeFromVersions.length)]
DISTRIBUTION_ARCHIVES.each {
// Adds a dependency for the current version
project.dependencies.add(BATS, project.dependencies.project(path: ":distribution:${it}", configuration: 'archives'))
@ -165,7 +157,6 @@ class VagrantTestPlugin implements Plugin<Project> {
project.extensions.esvagrant.testSeed = seed
project.extensions.esvagrant.formattedTestSeed = formattedSeed
project.extensions.esvagrant.upgradeFromVersion = upgradeFromVersion
project.extensions.esvagrant.upgradeFromVersions = upgradeFromVersions
}
private static void createCleanTask(Project project) {
@ -256,22 +247,9 @@ class VagrantTestPlugin implements Plugin<Project> {
contents project.extensions.esvagrant.upgradeFromVersion
}
Task vagrantSetUpTask = project.tasks.create('vagrantSetUp')
Task vagrantSetUpTask = project.tasks.create('setupBats')
vagrantSetUpTask.dependsOn 'vagrantCheckVersion'
vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils, copyBatsArchives, createVersionFile, createUpgradeFromFile
vagrantSetUpTask.doFirst {
project.gradle.addBuildListener new BuildAdapter() {
@Override
void buildFinished(BuildResult result) {
if (result.failure) {
println "Reproduce with: gradle packagingTest "
+"-Pvagrant.boxes=${project.extensions.esvagrant.boxes} "
+ "-Dtests.seed=${project.extensions.esvagrant.formattedSeed} "
+ "-Dtests.packaging.upgrade.from.versions=${project.extensions.esvagrant.upgradeFromVersions.join(",")}"
}
}
}
}
}
private static void createUpdateVersionsTask(Project project) {
@ -280,7 +258,7 @@ class VagrantTestPlugin implements Plugin<Project> {
group 'Verification'
doLast {
File versions = getVersionsFile(project)
versions.text = listVersions(project).join('\n') + '\n'
versions.setText(listVersions(project).join('\n') + '\n', 'UTF-8')
}
}
}
@ -290,14 +268,11 @@ class VagrantTestPlugin implements Plugin<Project> {
description 'Update file containing options for the\n "starting" version in the "upgrade from" packaging tests.'
group 'Verification'
doLast {
String maybeUpdateFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null)
if (maybeUpdateFromVersions == null) {
Set<String> versions = listVersions(project)
Set<String> actualVersions = new TreeSet<>(project.extensions.esvagrant.upgradeFromVersions)
if (!versions.equals(actualVersions)) {
throw new GradleException("out-of-date versions " + actualVersions +
", expected " + versions + "; run gradle vagrantUpdateVersions")
}
Set<String> versions = listVersions(project)
Set<String> actualVersions = new TreeSet<>(getVersionsFile(project).readLines('UTF-8'))
if (!versions.equals(actualVersions)) {
throw new GradleException("out-of-date versions " + actualVersions +
", expected " + versions + "; run gradle vagrantUpdateVersions")
}
}
}
@ -379,8 +354,8 @@ class VagrantTestPlugin implements Plugin<Project> {
assert project.tasks.virtualboxCheckVersion != null
Task virtualboxCheckVersion = project.tasks.virtualboxCheckVersion
assert project.tasks.vagrantSetUp != null
Task vagrantSetUp = project.tasks.vagrantSetUp
assert project.tasks.setupBats != null
Task setupBats = project.tasks.setupBats
assert project.tasks.packagingTest != null
Task packagingTest = project.tasks.packagingTest
@ -411,8 +386,9 @@ class VagrantTestPlugin implements Plugin<Project> {
boxName box
environmentVars vagrantEnvVars
args 'box', 'update', box
dependsOn vagrantCheckVersion, virtualboxCheckVersion, vagrantSetUp
dependsOn vagrantCheckVersion, virtualboxCheckVersion
}
update.mustRunAfter(setupBats)
Task up = project.tasks.create("vagrant${boxTask}#up", VagrantCommandTask) {
boxName box
@ -433,11 +409,6 @@ class VagrantTestPlugin implements Plugin<Project> {
dependsOn update
}
if (project.extensions.esvagrant.boxes.contains(box) == false) {
// we d'ont need tests tasks if this box was not specified
continue;
}
Task smoke = project.tasks.create("vagrant${boxTask}#smoketest", Exec) {
environment vagrantEnvVars
dependsOn up
@ -447,14 +418,32 @@ class VagrantTestPlugin implements Plugin<Project> {
}
vagrantSmokeTest.dependsOn(smoke)
Task packaging = project.tasks.create("vagrant${boxTask}#packagingtest", BatsOverVagrantTask) {
Task packaging = project.tasks.create("vagrant${boxTask}#packagingTest", BatsOverVagrantTask) {
boxName box
environmentVars vagrantEnvVars
dependsOn up
dependsOn up, setupBats
finalizedBy halt
command BATS_TEST_COMMAND
}
packagingTest.dependsOn(packaging)
TaskExecutionAdapter reproduceListener = new TaskExecutionAdapter() {
@Override
void afterExecute(Task task, TaskState state) {
if (state.failure != null) {
println "REPRODUCE WITH: gradle ${packaging.path} " +
"-Dtests.seed=${project.extensions.esvagrant.formattedTestSeed} "
}
}
}
packaging.doFirst {
project.gradle.addListener(reproduceListener)
}
packaging.doLast {
project.gradle.removeListener(reproduceListener)
}
if (project.extensions.esvagrant.boxes.contains(box)) {
packagingTest.dependsOn(packaging)
}
}
}
}

View File

@ -157,7 +157,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineTransportAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]MultiSearchRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchResponse.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]ShardSearchFailure.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportClearScrollAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportMultiSearchAction.java" checks="LineLength" />
@ -452,8 +451,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]InternalReverseNested.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]ReverseNestedAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]RangeAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]InternalSampler.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]SamplerAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]GlobalOrdinalsSignificantTermsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsAggregatorFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsParametersParser.java" checks="LineLength" />

View File

@ -0,0 +1,45 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
@defaultMessage Explicitly specify the ContentType of HTTP entities when creating
org.apache.http.entity.StringEntity#<init>(java.lang.String)
org.apache.http.entity.StringEntity#<init>(java.lang.String,java.lang.String)
org.apache.http.entity.StringEntity#<init>(java.lang.String,java.nio.charset.Charset)
org.apache.http.entity.ByteArrayEntity#<init>(byte[])
org.apache.http.entity.ByteArrayEntity#<init>(byte[],int,int)
org.apache.http.entity.FileEntity#<init>(java.io.File)
org.apache.http.entity.InputStreamEntity#<init>(java.io.InputStream)
org.apache.http.entity.InputStreamEntity#<init>(java.io.InputStream,long)
org.apache.http.nio.entity.NByteArrayEntity#<init>(byte[])
org.apache.http.nio.entity.NByteArrayEntity#<init>(byte[],int,int)
org.apache.http.nio.entity.NFileEntity#<init>(java.io.File)
org.apache.http.nio.entity.NStringEntity#<init>(java.lang.String)
org.apache.http.nio.entity.NStringEntity#<init>(java.lang.String,java.lang.String)
@defaultMessage Use non-deprecated constructors
org.apache.http.nio.entity.NFileEntity#<init>(java.io.File,java.lang.String)
org.apache.http.nio.entity.NFileEntity#<init>(java.io.File,java.lang.String,boolean)
org.apache.http.entity.FileEntity#<init>(java.io.File,java.lang.String)
org.apache.http.entity.StringEntity#<init>(java.lang.String,java.lang.String,java.lang.String)
@defaultMessage BasicEntity is easy to mess up and forget to set content type
org.apache.http.entity.BasicHttpEntity#<init>()
@defaultMessage EntityTemplate is easy to mess up and forget to set content type
org.apache.http.entity.EntityTemplate#<init>(org.apache.http.entity.ContentProducer)
@defaultMessage SerializableEntity uses java serialization and makes it easy to forget to set content type
org.apache.http.entity.SerializableEntity#<init>(java.io.Serializable)

View File

@ -53,6 +53,6 @@ public class TransportNoopSearchAction extends HandledTransportAction<SearchRequ
new SearchHit[0], 0L, 0.0f),
new InternalAggregations(Collections.emptyList()),
new Suggest(Collections.emptyList()),
new SearchProfileShardResults(Collections.emptyMap()), false, false), "", 1, 1, 0, new ShardSearchFailure[0]));
new SearchProfileShardResults(Collections.emptyMap()), false, false, 1), "", 1, 1, 0, new ShardSearchFailure[0]));
}
}

View File

@ -1,3 +1,5 @@
import org.elasticsearch.gradle.precommit.PrecommitTasks
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
@ -39,3 +41,9 @@ dependencyLicenses {
it.group.startsWith('org.elasticsearch') == false
}
}
forbiddenApisMain {
// core does not depend on the httpclient for compile so we add the signatures here. We don't add them for test as they are already
// specified
signaturesURLs += [PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.client;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpHead;
import org.apache.http.client.methods.HttpPost;
@ -28,16 +29,28 @@ import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Locale;
@ -69,8 +82,140 @@ final class Request {
'}';
}
static Request ping() {
return new Request("HEAD", "/", Collections.emptyMap(), null);
static Request delete(DeleteRequest deleteRequest) {
String endpoint = endpoint(deleteRequest.index(), deleteRequest.type(), deleteRequest.id());
Params parameters = Params.builder();
parameters.withRouting(deleteRequest.routing());
parameters.withParent(deleteRequest.parent());
parameters.withTimeout(deleteRequest.timeout());
parameters.withVersion(deleteRequest.version());
parameters.withVersionType(deleteRequest.versionType());
parameters.withRefreshPolicy(deleteRequest.getRefreshPolicy());
parameters.withWaitForActiveShards(deleteRequest.waitForActiveShards());
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
}
static Request bulk(BulkRequest bulkRequest) throws IOException {
Params parameters = Params.builder();
parameters.withTimeout(bulkRequest.timeout());
parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy());
// Bulk API only supports newline delimited JSON or Smile. Before executing
// the bulk, we need to check that all requests have the same content-type
// and this content-type is supported by the Bulk API.
XContentType bulkContentType = null;
for (int i = 0; i < bulkRequest.numberOfActions(); i++) {
DocWriteRequest<?> request = bulkRequest.requests().get(i);
DocWriteRequest.OpType opType = request.opType();
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
bulkContentType = enforceSameContentType((IndexRequest) request, bulkContentType);
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = (UpdateRequest) request;
if (updateRequest.doc() != null) {
bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType);
}
if (updateRequest.upsertRequest() != null) {
bulkContentType = enforceSameContentType(updateRequest.upsertRequest(), bulkContentType);
}
}
}
if (bulkContentType == null) {
bulkContentType = XContentType.JSON;
}
byte separator = bulkContentType.xContent().streamSeparator();
ContentType requestContentType = ContentType.create(bulkContentType.mediaType());
ByteArrayOutputStream content = new ByteArrayOutputStream();
for (DocWriteRequest<?> request : bulkRequest.requests()) {
DocWriteRequest.OpType opType = request.opType();
try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) {
metadata.startObject();
{
metadata.startObject(opType.getLowercase());
if (Strings.hasLength(request.index())) {
metadata.field("_index", request.index());
}
if (Strings.hasLength(request.type())) {
metadata.field("_type", request.type());
}
if (Strings.hasLength(request.id())) {
metadata.field("_id", request.id());
}
if (Strings.hasLength(request.routing())) {
metadata.field("_routing", request.routing());
}
if (Strings.hasLength(request.parent())) {
metadata.field("_parent", request.parent());
}
if (request.version() != Versions.MATCH_ANY) {
metadata.field("_version", request.version());
}
VersionType versionType = request.versionType();
if (versionType != VersionType.INTERNAL) {
if (versionType == VersionType.EXTERNAL) {
metadata.field("_version_type", "external");
} else if (versionType == VersionType.EXTERNAL_GTE) {
metadata.field("_version_type", "external_gte");
} else if (versionType == VersionType.FORCE) {
metadata.field("_version_type", "force");
}
}
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
IndexRequest indexRequest = (IndexRequest) request;
if (Strings.hasLength(indexRequest.getPipeline())) {
metadata.field("pipeline", indexRequest.getPipeline());
}
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = (UpdateRequest) request;
if (updateRequest.retryOnConflict() > 0) {
metadata.field("_retry_on_conflict", updateRequest.retryOnConflict());
}
if (updateRequest.fetchSource() != null) {
metadata.field("_source", updateRequest.fetchSource());
}
}
metadata.endObject();
}
metadata.endObject();
BytesRef metadataSource = metadata.bytes().toBytesRef();
content.write(metadataSource.bytes, metadataSource.offset, metadataSource.length);
content.write(separator);
}
BytesRef source = null;
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
IndexRequest indexRequest = (IndexRequest) request;
BytesReference indexSource = indexRequest.source();
XContentType indexXContentType = indexRequest.getContentType();
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, indexSource, indexXContentType)) {
try (XContentBuilder builder = XContentBuilder.builder(bulkContentType.xContent())) {
builder.copyCurrentStructure(parser);
source = builder.bytes().toBytesRef();
}
}
} else if (opType == DocWriteRequest.OpType.UPDATE) {
source = XContentHelper.toXContent((UpdateRequest) request, bulkContentType, false).toBytesRef();
}
if (source != null) {
content.write(source.bytes, source.offset, source.length);
content.write(separator);
}
}
HttpEntity entity = new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType);
return new Request(HttpPost.METHOD_NAME, "/_bulk", parameters.getParams(), entity);
}
static Request exists(GetRequest getRequest) {
@ -118,6 +263,52 @@ final class Request {
return new Request(method, endpoint, parameters.getParams(), entity);
}
static Request ping() {
return new Request("HEAD", "/", Collections.emptyMap(), null);
}
static Request update(UpdateRequest updateRequest) throws IOException {
String endpoint = endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update");
Params parameters = Params.builder();
parameters.withRouting(updateRequest.routing());
parameters.withParent(updateRequest.parent());
parameters.withTimeout(updateRequest.timeout());
parameters.withRefreshPolicy(updateRequest.getRefreshPolicy());
parameters.withWaitForActiveShards(updateRequest.waitForActiveShards());
parameters.withDocAsUpsert(updateRequest.docAsUpsert());
parameters.withFetchSourceContext(updateRequest.fetchSource());
parameters.withRetryOnConflict(updateRequest.retryOnConflict());
parameters.withVersion(updateRequest.version());
parameters.withVersionType(updateRequest.versionType());
// The Java API allows update requests with different content types
// set for the partial document and the upsert document. This client
// only accepts update requests that have the same content types set
// for both doc and upsert.
XContentType xContentType = null;
if (updateRequest.doc() != null) {
xContentType = updateRequest.doc().getContentType();
}
if (updateRequest.upsertRequest() != null) {
XContentType upsertContentType = updateRequest.upsertRequest().getContentType();
if ((xContentType != null) && (xContentType != upsertContentType)) {
throw new IllegalStateException("Update request cannot have different content types for doc [" + xContentType + "]" +
" and upsert [" + upsertContentType + "] documents");
} else {
xContentType = upsertContentType;
}
}
if (xContentType == null) {
xContentType = Requests.INDEX_CONTENT_TYPE;
}
BytesRef source = XContentHelper.toXContent(updateRequest, xContentType, false).toBytesRef();
HttpEntity entity = new ByteArrayEntity(source.bytes, source.offset, source.length, ContentType.create(xContentType.mediaType()));
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), entity);
}
/**
* Utility method to build request's endpoint.
*/
@ -160,6 +351,13 @@ final class Request {
return this;
}
Params withDocAsUpsert(boolean docAsUpsert) {
if (docAsUpsert) {
return putParam("doc_as_upsert", Boolean.TRUE.toString());
}
return this;
}
Params withFetchSourceContext(FetchSourceContext fetchSourceContext) {
if (fetchSourceContext != null) {
if (fetchSourceContext.fetchSource() == false) {
@ -203,7 +401,14 @@ final class Request {
Params withRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) {
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
putParam("refresh", refreshPolicy.getValue());
return putParam("refresh", refreshPolicy.getValue());
}
return this;
}
Params withRetryOnConflict(int retryOnConflict) {
if (retryOnConflict > 0) {
return putParam("retry_on_conflict", String.valueOf(retryOnConflict));
}
return this;
}
@ -252,4 +457,26 @@ final class Request {
return new Params();
}
}
/**
* Ensure that the {@link IndexRequest}'s content type is supported by the Bulk API and that it conforms
* to the current {@link BulkRequest}'s content type (if it's known at the time of this method get called).
*
* @return the {@link IndexRequest}'s content type
*/
static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable XContentType xContentType) {
XContentType requestContentType = indexRequest.getContentType();
if (requestContentType != XContentType.JSON && requestContentType != XContentType.SMILE) {
throw new IllegalArgumentException("Unsupported content-type found for request with content-type [" + requestContentType
+ "], only JSON and SMILE are supported");
}
if (xContentType == null) {
return requestContentType;
}
if (requestContentType != xContentType) {
throw new IllegalArgumentException("Mismatching content-type found for request with content-type [" + requestContentType
+ "], previous requests have content-type [" + xContentType + "]");
}
return xContentType;
}
}

View File

@ -26,11 +26,17 @@ import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.main.MainRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentParser;
@ -39,24 +45,64 @@ import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Stream;
import static java.util.Collections.emptySet;
import static java.util.Collections.singleton;
import static java.util.stream.Collectors.toList;
/**
* High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses.
* The provided {@link RestClient} is externally built and closed.
* Can be sub-classed to expose additional client methods that make use of endpoints added to Elasticsearch through plugins, or to
* add support for custom response sections, again added to Elasticsearch through plugins.
*/
public class RestHighLevelClient {
private final RestClient client;
private final NamedXContentRegistry registry;
public RestHighLevelClient(RestClient client) {
this.client = Objects.requireNonNull(client);
/**
* Creates a {@link RestHighLevelClient} given the low level {@link RestClient} that it should use to perform requests.
*/
public RestHighLevelClient(RestClient restClient) {
this(restClient, Collections.emptyList());
}
/**
* Creates a {@link RestHighLevelClient} given the low level {@link RestClient} that it should use to perform requests and
* a list of entries that allow to parse custom response sections added to Elasticsearch through plugins.
*/
protected RestHighLevelClient(RestClient restClient, List<NamedXContentRegistry.Entry> namedXContentEntries) {
this.client = Objects.requireNonNull(restClient);
this.registry = new NamedXContentRegistry(Stream.of(
getNamedXContents().stream(),
namedXContentEntries.stream()
).flatMap(Function.identity()).collect(toList()));
}
/**
* Executes a bulk request using the Bulk API
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
*/
public BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, emptySet(), headers);
}
/**
* Asynchronously executes a bulk request using the Bulk API
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
*/
public void bulkAsync(BulkRequest bulkRequest, ActionListener<BulkResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, listener, emptySet(), headers);
}
/**
@ -121,14 +167,55 @@ public class RestHighLevelClient {
performRequestAsyncAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, listener, emptySet(), headers);
}
private <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request, Function<Req, Request> requestConverter,
CheckedFunction<XContentParser, Resp, IOException> entityParser, Set<Integer> ignores, Header... headers) throws IOException {
/**
* Updates a document using the Update API
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a>
*/
public UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, emptySet(), headers);
}
/**
* Asynchronously updates a document using the Update API
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a>
*/
public void updateAsync(UpdateRequest updateRequest, ActionListener<UpdateResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, listener, emptySet(), headers);
}
/**
* Deletes a document by id using the Delete api
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
*/
public DeleteResponse delete(DeleteRequest deleteRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(deleteRequest, Request::delete, DeleteResponse::fromXContent, Collections.singleton(404),
headers);
}
/**
* Asynchronously deletes a document by id using the Delete api
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
*/
public void deleteAsync(DeleteRequest deleteRequest, ActionListener<DeleteResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(deleteRequest, Request::delete, DeleteResponse::fromXContent, listener,
Collections.singleton(404), headers);
}
private <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
CheckedFunction<Req, Request, IOException> requestConverter,
CheckedFunction<XContentParser, Resp, IOException> entityParser,
Set<Integer> ignores, Header... headers) throws IOException {
return performRequest(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), ignores, headers);
}
<Req extends ActionRequest, Resp> Resp performRequest(Req request, Function<Req, Request> requestConverter,
CheckedFunction<Response, Resp, IOException> responseConverter, Set<Integer> ignores, Header... headers) throws IOException {
<Req extends ActionRequest, Resp> Resp performRequest(Req request,
CheckedFunction<Req, Request, IOException> requestConverter,
CheckedFunction<Response, Resp, IOException> responseConverter,
Set<Integer> ignores, Header... headers) throws IOException {
ActionRequestValidationException validationException = request.validate();
if (validationException != null) {
throw validationException;
@ -154,27 +241,36 @@ public class RestHighLevelClient {
}
}
private <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request, Function<Req, Request> requestConverter,
CheckedFunction<XContentParser, Resp, IOException> entityParser, ActionListener<Resp> listener,
Set<Integer> ignores, Header... headers) {
private <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request,
CheckedFunction<Req, Request, IOException> requestConverter,
CheckedFunction<XContentParser, Resp, IOException> entityParser,
ActionListener<Resp> listener, Set<Integer> ignores, Header... headers) {
performRequestAsync(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser),
listener, ignores, headers);
}
<Req extends ActionRequest, Resp> void performRequestAsync(Req request, Function<Req, Request> requestConverter,
CheckedFunction<Response, Resp, IOException> responseConverter, ActionListener<Resp> listener,
Set<Integer> ignores, Header... headers) {
<Req extends ActionRequest, Resp> void performRequestAsync(Req request,
CheckedFunction<Req, Request, IOException> requestConverter,
CheckedFunction<Response, Resp, IOException> responseConverter,
ActionListener<Resp> listener, Set<Integer> ignores, Header... headers) {
ActionRequestValidationException validationException = request.validate();
if (validationException != null) {
listener.onFailure(validationException);
return;
}
Request req = requestConverter.apply(request);
Request req;
try {
req = requestConverter.apply(request);
} catch (Exception e) {
listener.onFailure(e);
return;
}
ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores);
client.performRequestAsync(req.method, req.endpoint, req.params, req.entity, responseListener, headers);
}
static <Resp> ResponseListener wrapResponseListener(CheckedFunction<Response, Resp, IOException> responseConverter,
<Resp> ResponseListener wrapResponseListener(CheckedFunction<Response, Resp, IOException> responseConverter,
ActionListener<Resp> actionListener, Set<Integer> ignores) {
return new ResponseListener() {
@Override
@ -219,7 +315,7 @@ public class RestHighLevelClient {
* that wraps the original {@link ResponseException}. The potential exception obtained while parsing is added to the returned
* exception as a suppressed exception. This method is guaranteed to not throw any exception eventually thrown while parsing.
*/
static ElasticsearchStatusException parseResponseException(ResponseException responseException) {
ElasticsearchStatusException parseResponseException(ResponseException responseException) {
Response response = responseException.getResponse();
HttpEntity entity = response.getEntity();
ElasticsearchStatusException elasticsearchException;
@ -239,7 +335,7 @@ public class RestHighLevelClient {
return elasticsearchException;
}
static <Resp> Resp parseEntity(
<Resp> Resp parseEntity(
HttpEntity entity, CheckedFunction<XContentParser, Resp, IOException> entityParser) throws IOException {
if (entity == null) {
throw new IllegalStateException("Response body expected but not returned");
@ -251,7 +347,7 @@ public class RestHighLevelClient {
if (xContentType == null) {
throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue());
}
try (XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, entity.getContent())) {
try (XContentParser parser = xContentType.xContent().createParser(registry, entity.getContent())) {
return entityParser.apply(parser);
}
}
@ -259,4 +355,10 @@ public class RestHighLevelClient {
static boolean convertExistsResponse(Response response) {
return response.getStatusLine().getStatusCode() == 200;
}
static List<NamedXContentRegistry.Entry> getNamedXContents() {
List<NamedXContentRegistry.Entry> namedXContents = new ArrayList<>();
//namedXContents.add(new NamedXContentRegistry.Entry(Aggregation.class, new ParseField("sterms"), StringTerms::fromXContent));
return namedXContents;
}
}

View File

@ -25,26 +25,112 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import static org.hamcrest.CoreMatchers.containsString;
import static java.util.Collections.singletonMap;
public class CrudIT extends ESRestHighLevelClientTestCase {
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/23196")
public void testDelete() throws IOException {
{
// Testing non existing document
String docId = "does_not_exist";
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId);
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync);
assertEquals("index", deleteResponse.getIndex());
assertEquals("type", deleteResponse.getType());
assertEquals(docId, deleteResponse.getId());
assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult());
}
{
// Testing deletion
String docId = "id";
highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")));
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId);
if (randomBoolean()) {
deleteRequest.version(1L);
}
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync);
assertEquals("index", deleteResponse.getIndex());
assertEquals("type", deleteResponse.getType());
assertEquals(docId, deleteResponse.getId());
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
}
{
// Testing version conflict
String docId = "version_conflict";
highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")));
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).version(2);
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
() -> execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync));
assertEquals(RestStatus.CONFLICT, exception.status());
assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][" + docId + "]: " +
"version conflict, current version [1] is different than the one provided [2]]", exception.getMessage());
assertEquals("index", exception.getMetadata("es.index").get(0));
}
{
// Testing version type
String docId = "version_type";
highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar"))
.versionType(VersionType.EXTERNAL).version(12));
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).versionType(VersionType.EXTERNAL).version(13);
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync);
assertEquals("index", deleteResponse.getIndex());
assertEquals("type", deleteResponse.getType());
assertEquals(docId, deleteResponse.getId());
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
}
{
// Testing version type with a wrong version
String docId = "wrong_version";
highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar"))
.versionType(VersionType.EXTERNAL).version(12));
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> {
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).versionType(VersionType.EXTERNAL).version(10);
execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync);
});
assertEquals(RestStatus.CONFLICT, exception.status());
assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][" +
docId + "]: version conflict, current version [12] is higher or equal to the one provided [10]]", exception.getMessage());
assertEquals("index", exception.getMetadata("es.index").get(0));
}
{
// Testing routing
String docId = "routing";
highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")).routing("foo"));
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).routing("foo");
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync);
assertEquals("index", deleteResponse.getIndex());
assertEquals("type", deleteResponse.getType());
assertEquals(docId, deleteResponse.getId());
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
}
}
public void testExists() throws IOException {
{
GetRequest getRequest = new GetRequest("index", "type", "id");
@ -64,10 +150,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
}
{
GetRequest getRequest = new GetRequest("index", "type", "does_not_exist").version(1);
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
() -> execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
assertEquals(RestStatus.BAD_REQUEST, exception.status());
assertThat(exception.getMessage(), containsString("/index/type/does_not_exist?version=1: HTTP/1.1 400 Bad Request"));
assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
}
}
@ -266,4 +349,253 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
"version conflict, document already exists (current version [1])]", exception.getMessage());
}
}
public void testUpdate() throws IOException {
{
UpdateRequest updateRequest = new UpdateRequest("index", "type", "does_not_exist");
updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values()));
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () ->
execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync));
assertEquals(RestStatus.NOT_FOUND, exception.status());
assertEquals("Elasticsearch exception [type=document_missing_exception, reason=[type][does_not_exist]: document missing]",
exception.getMessage());
}
{
IndexRequest indexRequest = new IndexRequest("index", "type", "id");
indexRequest.source(singletonMap("field", "value"));
IndexResponse indexResponse = highLevelClient().index(indexRequest);
assertEquals(RestStatus.CREATED, indexResponse.status());
UpdateRequest updateRequest = new UpdateRequest("index", "type", "id");
updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values()));
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
assertEquals(RestStatus.OK, updateResponse.status());
assertEquals(indexResponse.getVersion() + 1, updateResponse.getVersion());
UpdateRequest updateRequestConflict = new UpdateRequest("index", "type", "id");
updateRequestConflict.doc(singletonMap("field", "with_version_conflict"), randomFrom(XContentType.values()));
updateRequestConflict.version(indexResponse.getVersion());
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () ->
execute(updateRequestConflict, highLevelClient()::update, highLevelClient()::updateAsync));
assertEquals(RestStatus.CONFLICT, exception.status());
assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][id]: version conflict, " +
"current version [2] is different than the one provided [1]]", exception.getMessage());
}
{
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> {
UpdateRequest updateRequest = new UpdateRequest("index", "type", "id");
updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values()));
if (randomBoolean()) {
updateRequest.parent("missing");
} else {
updateRequest.routing("missing");
}
execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
});
assertEquals(RestStatus.NOT_FOUND, exception.status());
assertEquals("Elasticsearch exception [type=document_missing_exception, reason=[type][id]: document missing]",
exception.getMessage());
}
{
IndexRequest indexRequest = new IndexRequest("index", "type", "with_script");
indexRequest.source(singletonMap("counter", 12));
IndexResponse indexResponse = highLevelClient().index(indexRequest);
assertEquals(RestStatus.CREATED, indexResponse.status());
UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_script");
Script script = new Script(ScriptType.INLINE, "painless", "ctx._source.counter += params.count", singletonMap("count", 8));
updateRequest.script(script);
updateRequest.fetchSource(true);
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
assertEquals(RestStatus.OK, updateResponse.status());
assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
assertEquals(2L, updateResponse.getVersion());
assertEquals(20, updateResponse.getGetResult().sourceAsMap().get("counter"));
}
{
IndexRequest indexRequest = new IndexRequest("index", "type", "with_doc");
indexRequest.source("field_1", "one", "field_3", "three");
indexRequest.version(12L);
indexRequest.versionType(VersionType.EXTERNAL);
IndexResponse indexResponse = highLevelClient().index(indexRequest);
assertEquals(RestStatus.CREATED, indexResponse.status());
assertEquals(12L, indexResponse.getVersion());
UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_doc");
updateRequest.doc(singletonMap("field_2", "two"), randomFrom(XContentType.values()));
updateRequest.fetchSource("field_*", "field_3");
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
assertEquals(RestStatus.OK, updateResponse.status());
assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
assertEquals(13L, updateResponse.getVersion());
GetResult getResult = updateResponse.getGetResult();
assertEquals(13L, updateResponse.getVersion());
Map<String, Object> sourceAsMap = getResult.sourceAsMap();
assertEquals("one", sourceAsMap.get("field_1"));
assertEquals("two", sourceAsMap.get("field_2"));
assertFalse(sourceAsMap.containsKey("field_3"));
}
{
IndexRequest indexRequest = new IndexRequest("index", "type", "noop");
indexRequest.source("field", "value");
IndexResponse indexResponse = highLevelClient().index(indexRequest);
assertEquals(RestStatus.CREATED, indexResponse.status());
assertEquals(1L, indexResponse.getVersion());
UpdateRequest updateRequest = new UpdateRequest("index", "type", "noop");
updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values()));
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
assertEquals(RestStatus.OK, updateResponse.status());
assertEquals(DocWriteResponse.Result.NOOP, updateResponse.getResult());
assertEquals(1L, updateResponse.getVersion());
updateRequest.detectNoop(false);
updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
assertEquals(RestStatus.OK, updateResponse.status());
assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
assertEquals(2L, updateResponse.getVersion());
}
{
UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_upsert");
updateRequest.upsert(singletonMap("doc_status", "created"));
updateRequest.doc(singletonMap("doc_status", "updated"));
updateRequest.fetchSource(true);
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
assertEquals(RestStatus.CREATED, updateResponse.status());
assertEquals("index", updateResponse.getIndex());
assertEquals("type", updateResponse.getType());
assertEquals("with_upsert", updateResponse.getId());
GetResult getResult = updateResponse.getGetResult();
assertEquals(1L, updateResponse.getVersion());
assertEquals("created", getResult.sourceAsMap().get("doc_status"));
}
{
UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_doc_as_upsert");
updateRequest.doc(singletonMap("field", "initialized"));
updateRequest.fetchSource(true);
updateRequest.docAsUpsert(true);
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
assertEquals(RestStatus.CREATED, updateResponse.status());
assertEquals("index", updateResponse.getIndex());
assertEquals("type", updateResponse.getType());
assertEquals("with_doc_as_upsert", updateResponse.getId());
GetResult getResult = updateResponse.getGetResult();
assertEquals(1L, updateResponse.getVersion());
assertEquals("initialized", getResult.sourceAsMap().get("field"));
}
{
UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_scripted_upsert");
updateRequest.fetchSource(true);
updateRequest.script(new Script(ScriptType.INLINE, "painless", "ctx._source.level = params.test", singletonMap("test", "C")));
updateRequest.scriptedUpsert(true);
updateRequest.upsert(singletonMap("level", "A"));
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
assertEquals(RestStatus.CREATED, updateResponse.status());
assertEquals("index", updateResponse.getIndex());
assertEquals("type", updateResponse.getType());
assertEquals("with_scripted_upsert", updateResponse.getId());
GetResult getResult = updateResponse.getGetResult();
assertEquals(1L, updateResponse.getVersion());
assertEquals("C", getResult.sourceAsMap().get("level"));
}
{
IllegalStateException exception = expectThrows(IllegalStateException.class, () -> {
UpdateRequest updateRequest = new UpdateRequest("index", "type", "id");
updateRequest.doc(new IndexRequest().source(Collections.singletonMap("field", "doc"), XContentType.JSON));
updateRequest.upsert(new IndexRequest().source(Collections.singletonMap("field", "upsert"), XContentType.YAML));
execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
});
assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents",
exception.getMessage());
}
}
public void testBulk() throws IOException {
int nbItems = randomIntBetween(10, 100);
boolean[] errors = new boolean[nbItems];
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
BulkRequest bulkRequest = new BulkRequest();
for (int i = 0; i < nbItems; i++) {
String id = String.valueOf(i);
boolean erroneous = randomBoolean();
errors[i] = erroneous;
DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values());
if (opType == DocWriteRequest.OpType.DELETE) {
if (erroneous == false) {
assertEquals(RestStatus.CREATED,
highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status());
}
DeleteRequest deleteRequest = new DeleteRequest("index", "test", id);
bulkRequest.add(deleteRequest);
} else {
BytesReference source = XContentBuilder.builder(xContentType.xContent()).startObject().field("id", i).endObject().bytes();
if (opType == DocWriteRequest.OpType.INDEX) {
IndexRequest indexRequest = new IndexRequest("index", "test", id).source(source, xContentType);
if (erroneous) {
indexRequest.version(12L);
}
bulkRequest.add(indexRequest);
} else if (opType == DocWriteRequest.OpType.CREATE) {
IndexRequest createRequest = new IndexRequest("index", "test", id).source(source, xContentType).create(true);
if (erroneous) {
assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest).status());
}
bulkRequest.add(createRequest);
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = new UpdateRequest("index", "test", id)
.doc(new IndexRequest().source(source, xContentType));
if (erroneous == false) {
assertEquals(RestStatus.CREATED,
highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status());
}
bulkRequest.add(updateRequest);
}
}
}
BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync);
assertEquals(RestStatus.OK, bulkResponse.status());
assertTrue(bulkResponse.getTookInMillis() > 0);
assertEquals(nbItems, bulkResponse.getItems().length);
for (int i = 0; i < nbItems; i++) {
BulkItemResponse bulkItemResponse = bulkResponse.getItems()[i];
assertEquals(i, bulkItemResponse.getItemId());
assertEquals("index", bulkItemResponse.getIndex());
assertEquals("test", bulkItemResponse.getType());
assertEquals(String.valueOf(i), bulkItemResponse.getId());
DocWriteRequest.OpType requestOpType = bulkRequest.requests().get(i).opType();
if (requestOpType == DocWriteRequest.OpType.INDEX || requestOpType == DocWriteRequest.OpType.CREATE) {
assertEquals(errors[i], bulkItemResponse.isFailed());
assertEquals(errors[i] ? RestStatus.INTERNAL_SERVER_ERROR : RestStatus.CREATED, bulkItemResponse.status());
} else if (requestOpType == DocWriteRequest.OpType.UPDATE) {
assertEquals(errors[i], bulkItemResponse.isFailed());
assertEquals(errors[i] ? RestStatus.INTERNAL_SERVER_ERROR : RestStatus.OK, bulkItemResponse.status());
} else if (requestOpType == DocWriteRequest.OpType.DELETE) {
assertFalse(bulkItemResponse.isFailed());
assertEquals(errors[i] ? RestStatus.NOT_FOUND : RestStatus.OK, bulkItemResponse.status());
}
}
}
}

View File

@ -22,25 +22,40 @@ package org.elasticsearch.client;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ByteArrayEntity;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.RandomObjects;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.function.Consumer;
import java.util.function.Function;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.client.Request.enforceSameContentType;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
public class RequestTests extends ESTestCase {
public void testPing() {
@ -55,6 +70,39 @@ public class RequestTests extends ESTestCase {
getAndExistsTest(Request::get, "GET");
}
public void testDelete() throws IOException {
String index = randomAsciiOfLengthBetween(3, 10);
String type = randomAsciiOfLengthBetween(3, 10);
String id = randomAsciiOfLengthBetween(3, 10);
DeleteRequest deleteRequest = new DeleteRequest(index, type, id);
Map<String, String> expectedParams = new HashMap<>();
setRandomTimeout(deleteRequest, expectedParams);
setRandomRefreshPolicy(deleteRequest, expectedParams);
setRandomVersion(deleteRequest, expectedParams);
setRandomVersionType(deleteRequest, expectedParams);
if (frequently()) {
if (randomBoolean()) {
String routing = randomAsciiOfLengthBetween(3, 10);
deleteRequest.routing(routing);
expectedParams.put("routing", routing);
}
if (randomBoolean()) {
String parent = randomAsciiOfLengthBetween(3, 10);
deleteRequest.parent(parent);
expectedParams.put("parent", parent);
}
}
Request request = Request.delete(deleteRequest);
assertEquals("/" + index + "/" + type + "/" + id, request.endpoint);
assertEquals(expectedParams, request.params);
assertEquals("DELETE", request.method);
assertNull(request.entity);
}
public void testExists() {
getAndExistsTest(Request::exists, "HEAD");
}
@ -121,43 +169,7 @@ public class RequestTests extends ESTestCase {
expectedParams.put("stored_fields", storedFieldsParam.toString());
}
if (randomBoolean()) {
if (randomBoolean()) {
boolean fetchSource = randomBoolean();
getRequest.fetchSourceContext(new FetchSourceContext(fetchSource));
if (fetchSource == false) {
expectedParams.put("_source", "false");
}
} else {
int numIncludes = randomIntBetween(0, 5);
String[] includes = new String[numIncludes];
StringBuilder includesParam = new StringBuilder();
for (int i = 0; i < numIncludes; i++) {
String include = randomAsciiOfLengthBetween(3, 10);
includes[i] = include;
includesParam.append(include);
if (i < numIncludes - 1) {
includesParam.append(",");
}
}
if (numIncludes > 0) {
expectedParams.put("_source_include", includesParam.toString());
}
int numExcludes = randomIntBetween(0, 5);
String[] excludes = new String[numExcludes];
StringBuilder excludesParam = new StringBuilder();
for (int i = 0; i < numExcludes; i++) {
String exclude = randomAsciiOfLengthBetween(3, 10);
excludes[i] = exclude;
excludesParam.append(exclude);
if (i < numExcludes - 1) {
excludesParam.append(",");
}
}
if (numExcludes > 0) {
expectedParams.put("_source_exclude", excludesParam.toString());
}
getRequest.fetchSourceContext(new FetchSourceContext(true, includes, excludes));
}
randomizeFetchSourceContextParams(getRequest::fetchSourceContext, expectedParams);
}
}
Request request = requestConverter.apply(getRequest);
@ -185,33 +197,16 @@ public class RequestTests extends ESTestCase {
}
}
setRandomTimeout(indexRequest, expectedParams);
setRandomRefreshPolicy(indexRequest, expectedParams);
// There is some logic around _create endpoint and version/version type
if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) {
indexRequest.version(randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED));
expectedParams.put("version", Long.toString(Versions.MATCH_DELETED));
} else {
if (randomBoolean()) {
long version = randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED, Versions.NOT_FOUND, randomNonNegativeLong());
indexRequest.version(version);
if (version != Versions.MATCH_ANY) {
expectedParams.put("version", Long.toString(version));
}
}
if (randomBoolean()) {
VersionType versionType = randomFrom(VersionType.values());
indexRequest.versionType(versionType);
if (versionType != VersionType.INTERNAL) {
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
}
}
}
if (randomBoolean()) {
String timeout = randomTimeValue();
indexRequest.timeout(timeout);
expectedParams.put("timeout", timeout);
} else {
expectedParams.put("timeout", ReplicationRequest.DEFAULT_TIMEOUT.getStringRep());
setRandomVersion(indexRequest, expectedParams);
setRandomVersionType(indexRequest, expectedParams);
}
if (frequently()) {
@ -230,14 +225,6 @@ public class RequestTests extends ESTestCase {
indexRequest.setPipeline(pipeline);
expectedParams.put("pipeline", pipeline);
}
if (randomBoolean()) {
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
indexRequest.setRefreshPolicy(refreshPolicy);
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
expectedParams.put("refresh", refreshPolicy.getValue());
}
}
}
XContentType xContentType = randomFrom(XContentType.values());
@ -271,6 +258,325 @@ public class RequestTests extends ESTestCase {
}
}
public void testUpdate() throws IOException {
XContentType xContentType = randomFrom(XContentType.values());
Map<String, String> expectedParams = new HashMap<>();
String index = randomAsciiOfLengthBetween(3, 10);
String type = randomAsciiOfLengthBetween(3, 10);
String id = randomAsciiOfLengthBetween(3, 10);
UpdateRequest updateRequest = new UpdateRequest(index, type, id);
updateRequest.detectNoop(randomBoolean());
if (randomBoolean()) {
BytesReference source = RandomObjects.randomSource(random(), xContentType);
updateRequest.doc(new IndexRequest().source(source, xContentType));
boolean docAsUpsert = randomBoolean();
updateRequest.docAsUpsert(docAsUpsert);
if (docAsUpsert) {
expectedParams.put("doc_as_upsert", "true");
}
} else {
updateRequest.script(new Script("_value + 1"));
updateRequest.scriptedUpsert(randomBoolean());
}
if (randomBoolean()) {
BytesReference source = RandomObjects.randomSource(random(), xContentType);
updateRequest.upsert(new IndexRequest().source(source, xContentType));
}
if (randomBoolean()) {
String routing = randomAsciiOfLengthBetween(3, 10);
updateRequest.routing(routing);
expectedParams.put("routing", routing);
}
if (randomBoolean()) {
String parent = randomAsciiOfLengthBetween(3, 10);
updateRequest.parent(parent);
expectedParams.put("parent", parent);
}
if (randomBoolean()) {
String timeout = randomTimeValue();
updateRequest.timeout(timeout);
expectedParams.put("timeout", timeout);
} else {
expectedParams.put("timeout", ReplicationRequest.DEFAULT_TIMEOUT.getStringRep());
}
if (randomBoolean()) {
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
updateRequest.setRefreshPolicy(refreshPolicy);
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
expectedParams.put("refresh", refreshPolicy.getValue());
}
}
if (randomBoolean()) {
int waitForActiveShards = randomIntBetween(0, 10);
updateRequest.waitForActiveShards(waitForActiveShards);
expectedParams.put("wait_for_active_shards", String.valueOf(waitForActiveShards));
}
if (randomBoolean()) {
long version = randomLong();
updateRequest.version(version);
if (version != Versions.MATCH_ANY) {
expectedParams.put("version", Long.toString(version));
}
}
if (randomBoolean()) {
VersionType versionType = randomFrom(VersionType.values());
updateRequest.versionType(versionType);
if (versionType != VersionType.INTERNAL) {
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
}
}
if (randomBoolean()) {
int retryOnConflict = randomIntBetween(0, 5);
updateRequest.retryOnConflict(retryOnConflict);
if (retryOnConflict > 0) {
expectedParams.put("retry_on_conflict", String.valueOf(retryOnConflict));
}
}
if (randomBoolean()) {
randomizeFetchSourceContextParams(updateRequest::fetchSource, expectedParams);
}
Request request = Request.update(updateRequest);
assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.endpoint);
assertEquals(expectedParams, request.params);
assertEquals("POST", request.method);
HttpEntity entity = request.entity;
assertNotNull(entity);
assertTrue(entity instanceof ByteArrayEntity);
UpdateRequest parsedUpdateRequest = new UpdateRequest();
XContentType entityContentType = XContentType.fromMediaTypeOrFormat(entity.getContentType().getValue());
try (XContentParser parser = createParser(entityContentType.xContent(), entity.getContent())) {
parsedUpdateRequest.fromXContent(parser);
}
assertEquals(updateRequest.scriptedUpsert(), parsedUpdateRequest.scriptedUpsert());
assertEquals(updateRequest.docAsUpsert(), parsedUpdateRequest.docAsUpsert());
assertEquals(updateRequest.detectNoop(), parsedUpdateRequest.detectNoop());
assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource());
assertEquals(updateRequest.script(), parsedUpdateRequest.script());
if (updateRequest.doc() != null) {
assertToXContentEquivalent(updateRequest.doc().source(), parsedUpdateRequest.doc().source(), xContentType);
} else {
assertNull(parsedUpdateRequest.doc());
}
if (updateRequest.upsertRequest() != null) {
assertToXContentEquivalent(updateRequest.upsertRequest().source(), parsedUpdateRequest.upsertRequest().source(), xContentType);
} else {
assertNull(parsedUpdateRequest.upsertRequest());
}
}
public void testUpdateWithDifferentContentTypes() throws IOException {
IllegalStateException exception = expectThrows(IllegalStateException.class, () -> {
UpdateRequest updateRequest = new UpdateRequest();
updateRequest.doc(new IndexRequest().source(singletonMap("field", "doc"), XContentType.JSON));
updateRequest.upsert(new IndexRequest().source(singletonMap("field", "upsert"), XContentType.YAML));
Request.update(updateRequest);
});
assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents",
exception.getMessage());
}
public void testBulk() throws IOException {
Map<String, String> expectedParams = new HashMap<>();
BulkRequest bulkRequest = new BulkRequest();
if (randomBoolean()) {
String timeout = randomTimeValue();
bulkRequest.timeout(timeout);
expectedParams.put("timeout", timeout);
} else {
expectedParams.put("timeout", BulkShardRequest.DEFAULT_TIMEOUT.getStringRep());
}
if (randomBoolean()) {
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
bulkRequest.setRefreshPolicy(refreshPolicy);
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
expectedParams.put("refresh", refreshPolicy.getValue());
}
}
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
int nbItems = randomIntBetween(10, 100);
for (int i = 0; i < nbItems; i++) {
String index = randomAsciiOfLength(5);
String type = randomAsciiOfLength(5);
String id = randomAsciiOfLength(5);
BytesReference source = RandomObjects.randomSource(random(), xContentType);
DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values());
DocWriteRequest<?> docWriteRequest = null;
if (opType == DocWriteRequest.OpType.INDEX) {
IndexRequest indexRequest = new IndexRequest(index, type, id).source(source, xContentType);
docWriteRequest = indexRequest;
if (randomBoolean()) {
indexRequest.setPipeline(randomAsciiOfLength(5));
}
if (randomBoolean()) {
indexRequest.parent(randomAsciiOfLength(5));
}
} else if (opType == DocWriteRequest.OpType.CREATE) {
IndexRequest createRequest = new IndexRequest(index, type, id).source(source, xContentType).create(true);
docWriteRequest = createRequest;
if (randomBoolean()) {
createRequest.parent(randomAsciiOfLength(5));
}
} else if (opType == DocWriteRequest.OpType.UPDATE) {
final UpdateRequest updateRequest = new UpdateRequest(index, type, id).doc(new IndexRequest().source(source, xContentType));
docWriteRequest = updateRequest;
if (randomBoolean()) {
updateRequest.retryOnConflict(randomIntBetween(1, 5));
}
if (randomBoolean()) {
randomizeFetchSourceContextParams(updateRequest::fetchSource, new HashMap<>());
}
if (randomBoolean()) {
updateRequest.parent(randomAsciiOfLength(5));
}
} else if (opType == DocWriteRequest.OpType.DELETE) {
docWriteRequest = new DeleteRequest(index, type, id);
}
if (randomBoolean()) {
docWriteRequest.routing(randomAsciiOfLength(10));
}
if (randomBoolean()) {
docWriteRequest.version(randomNonNegativeLong());
}
if (randomBoolean()) {
docWriteRequest.versionType(randomFrom(VersionType.values()));
}
bulkRequest.add(docWriteRequest);
}
Request request = Request.bulk(bulkRequest);
assertEquals("/_bulk", request.endpoint);
assertEquals(expectedParams, request.params);
assertEquals("POST", request.method);
byte[] content = new byte[(int) request.entity.getContentLength()];
try (InputStream inputStream = request.entity.getContent()) {
Streams.readFully(inputStream, content);
}
BulkRequest parsedBulkRequest = new BulkRequest();
parsedBulkRequest.add(content, 0, content.length, xContentType);
assertEquals(bulkRequest.numberOfActions(), parsedBulkRequest.numberOfActions());
for (int i = 0; i < bulkRequest.numberOfActions(); i++) {
DocWriteRequest<?> originalRequest = bulkRequest.requests().get(i);
DocWriteRequest<?> parsedRequest = parsedBulkRequest.requests().get(i);
assertEquals(originalRequest.opType(), parsedRequest.opType());
assertEquals(originalRequest.index(), parsedRequest.index());
assertEquals(originalRequest.type(), parsedRequest.type());
assertEquals(originalRequest.id(), parsedRequest.id());
assertEquals(originalRequest.routing(), parsedRequest.routing());
assertEquals(originalRequest.parent(), parsedRequest.parent());
assertEquals(originalRequest.version(), parsedRequest.version());
assertEquals(originalRequest.versionType(), parsedRequest.versionType());
DocWriteRequest.OpType opType = originalRequest.opType();
if (opType == DocWriteRequest.OpType.INDEX) {
IndexRequest indexRequest = (IndexRequest) originalRequest;
IndexRequest parsedIndexRequest = (IndexRequest) parsedRequest;
assertEquals(indexRequest.getPipeline(), parsedIndexRequest.getPipeline());
assertToXContentEquivalent(indexRequest.source(), parsedIndexRequest.source(), xContentType);
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = (UpdateRequest) originalRequest;
UpdateRequest parsedUpdateRequest = (UpdateRequest) parsedRequest;
assertEquals(updateRequest.retryOnConflict(), parsedUpdateRequest.retryOnConflict());
assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource());
if (updateRequest.doc() != null) {
assertToXContentEquivalent(updateRequest.doc().source(), parsedUpdateRequest.doc().source(), xContentType);
} else {
assertNull(parsedUpdateRequest.doc());
}
}
}
}
public void testBulkWithDifferentContentTypes() throws IOException {
{
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(new DeleteRequest("index", "type", "0"));
bulkRequest.add(new UpdateRequest("index", "type", "1").script(new Script("test")));
bulkRequest.add(new DeleteRequest("index", "type", "2"));
Request request = Request.bulk(bulkRequest);
assertEquals(XContentType.JSON.mediaType(), request.entity.getContentType().getValue());
}
{
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(new DeleteRequest("index", "type", "0"));
bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), xContentType));
bulkRequest.add(new DeleteRequest("index", "type", "2"));
Request request = Request.bulk(bulkRequest);
assertEquals(xContentType.mediaType(), request.entity.getContentType().getValue());
}
{
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
UpdateRequest updateRequest = new UpdateRequest("index", "type", "0");
if (randomBoolean()) {
updateRequest.doc(new IndexRequest().source(singletonMap("field", "value"), xContentType));
} else {
updateRequest.upsert(new IndexRequest().source(singletonMap("field", "value"), xContentType));
}
Request request = Request.bulk(new BulkRequest().add(updateRequest));
assertEquals(xContentType.mediaType(), request.entity.getContentType().getValue());
}
{
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE));
bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON));
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest));
assertEquals("Mismatching content-type found for request with content-type [JSON], " +
"previous requests have content-type [SMILE]", exception.getMessage());
}
{
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(new IndexRequest("index", "type", "0")
.source(singletonMap("field", "value"), XContentType.JSON));
bulkRequest.add(new IndexRequest("index", "type", "1")
.source(singletonMap("field", "value"), XContentType.JSON));
bulkRequest.add(new UpdateRequest("index", "type", "2")
.doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON))
.upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE))
);
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest));
assertEquals("Mismatching content-type found for request with content-type [SMILE], " +
"previous requests have content-type [JSON]", exception.getMessage());
}
{
XContentType xContentType = randomFrom(XContentType.CBOR, XContentType.YAML);
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(new DeleteRequest("index", "type", "0"));
bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON));
bulkRequest.add(new DeleteRequest("index", "type", "2"));
bulkRequest.add(new DeleteRequest("index", "type", "3"));
bulkRequest.add(new IndexRequest("index", "type", "4").source(singletonMap("field", "value"), XContentType.JSON));
bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), xContentType));
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest));
assertEquals("Unsupported content-type found for request with content-type [" + xContentType
+ "], only JSON and SMILE are supported", exception.getMessage());
}
}
public void testParams() {
final int nbParams = randomIntBetween(0, 10);
Request.Params params = Request.Params.builder();
@ -306,5 +612,117 @@ public class RequestTests extends ESTestCase {
assertEquals("/a/b", Request.endpoint("a", "b"));
assertEquals("/a/b/_create", Request.endpoint("a", "b", "_create"));
assertEquals("/a/b/c/_create", Request.endpoint("a", "b", "c", "_create"));
assertEquals("/a/_create", Request.endpoint("a", null, null, "_create"));
}
}
public void testEnforceSameContentType() {
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
IndexRequest indexRequest = new IndexRequest().source(singletonMap("field", "value"), xContentType);
assertEquals(xContentType, enforceSameContentType(indexRequest, null));
assertEquals(xContentType, enforceSameContentType(indexRequest, xContentType));
XContentType bulkContentType = randomBoolean() ? xContentType : null;
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () ->
enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.CBOR), bulkContentType));
assertEquals("Unsupported content-type found for request with content-type [CBOR], only JSON and SMILE are supported",
exception.getMessage());
exception = expectThrows(IllegalArgumentException.class, () ->
enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.YAML), bulkContentType));
assertEquals("Unsupported content-type found for request with content-type [YAML], only JSON and SMILE are supported",
exception.getMessage());
XContentType requestContentType = xContentType == XContentType.JSON ? XContentType.SMILE : XContentType.JSON;
exception = expectThrows(IllegalArgumentException.class, () ->
enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType));
assertEquals("Mismatching content-type found for request with content-type [" + requestContentType + "], "
+ "previous requests have content-type [" + xContentType + "]", exception.getMessage());
}
/**
* Randomize the {@link FetchSourceContext} request parameters.
*/
private static void randomizeFetchSourceContextParams(Consumer<FetchSourceContext> consumer, Map<String, String> expectedParams) {
if (randomBoolean()) {
if (randomBoolean()) {
boolean fetchSource = randomBoolean();
consumer.accept(new FetchSourceContext(fetchSource));
if (fetchSource == false) {
expectedParams.put("_source", "false");
}
} else {
int numIncludes = randomIntBetween(0, 5);
String[] includes = new String[numIncludes];
StringBuilder includesParam = new StringBuilder();
for (int i = 0; i < numIncludes; i++) {
String include = randomAsciiOfLengthBetween(3, 10);
includes[i] = include;
includesParam.append(include);
if (i < numIncludes - 1) {
includesParam.append(",");
}
}
if (numIncludes > 0) {
expectedParams.put("_source_include", includesParam.toString());
}
int numExcludes = randomIntBetween(0, 5);
String[] excludes = new String[numExcludes];
StringBuilder excludesParam = new StringBuilder();
for (int i = 0; i < numExcludes; i++) {
String exclude = randomAsciiOfLengthBetween(3, 10);
excludes[i] = exclude;
excludesParam.append(exclude);
if (i < numExcludes - 1) {
excludesParam.append(",");
}
}
if (numExcludes > 0) {
expectedParams.put("_source_exclude", excludesParam.toString());
}
consumer.accept(new FetchSourceContext(true, includes, excludes));
}
}
}
private static void setRandomTimeout(ReplicationRequest<?> request, Map<String, String> expectedParams) {
if (randomBoolean()) {
String timeout = randomTimeValue();
request.timeout(timeout);
expectedParams.put("timeout", timeout);
} else {
expectedParams.put("timeout", ReplicationRequest.DEFAULT_TIMEOUT.getStringRep());
}
}
private static void setRandomRefreshPolicy(ReplicatedWriteRequest<?> request, Map<String, String> expectedParams) {
if (randomBoolean()) {
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
request.setRefreshPolicy(refreshPolicy);
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
expectedParams.put("refresh", refreshPolicy.getValue());
}
}
}
private static void setRandomVersion(DocWriteRequest<?> request, Map<String, String> expectedParams) {
if (randomBoolean()) {
long version = randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED, Versions.NOT_FOUND, randomNonNegativeLong());
request.version(version);
if (version != Versions.MATCH_ANY) {
expectedParams.put("version", Long.toString(version));
}
}
}
private static void setRandomVersionType(DocWriteRequest<?> request, Map<String, String> expectedParams) {
if (randomBoolean()) {
VersionType versionType = randomFrom(VersionType.values());
request.versionType(versionType);
if (versionType != VersionType.INTERNAL) {
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
}
}
}
}

View File

@ -0,0 +1,138 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.ESTestCase;
import org.junit.Before;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.mockito.Mockito.mock;
/**
* This test works against a {@link RestHighLevelClient} subclass that simulats how custom response sections returned by
* Elasticsearch plugins can be parsed using the high level client.
*/
public class RestHighLevelClientExtTests extends ESTestCase {
private RestHighLevelClient restHighLevelClient;
@Before
public void initClient() throws IOException {
RestClient restClient = mock(RestClient.class);
restHighLevelClient = new RestHighLevelClientExt(restClient);
}
public void testParseEntityCustomResponseSection() throws IOException {
{
HttpEntity jsonEntity = new StringEntity("{\"custom1\":{ \"field\":\"value\"}}", ContentType.APPLICATION_JSON);
BaseCustomResponseSection customSection = restHighLevelClient.parseEntity(jsonEntity, BaseCustomResponseSection::fromXContent);
assertThat(customSection, instanceOf(CustomResponseSection1.class));
CustomResponseSection1 customResponseSection1 = (CustomResponseSection1) customSection;
assertEquals("value", customResponseSection1.value);
}
{
HttpEntity jsonEntity = new StringEntity("{\"custom2\":{ \"array\": [\"item1\", \"item2\"]}}", ContentType.APPLICATION_JSON);
BaseCustomResponseSection customSection = restHighLevelClient.parseEntity(jsonEntity, BaseCustomResponseSection::fromXContent);
assertThat(customSection, instanceOf(CustomResponseSection2.class));
CustomResponseSection2 customResponseSection2 = (CustomResponseSection2) customSection;
assertArrayEquals(new String[]{"item1", "item2"}, customResponseSection2.values);
}
}
private static class RestHighLevelClientExt extends RestHighLevelClient {
private RestHighLevelClientExt(RestClient restClient) {
super(restClient, getNamedXContentsExt());
}
private static List<NamedXContentRegistry.Entry> getNamedXContentsExt() {
List<NamedXContentRegistry.Entry> entries = new ArrayList<>();
entries.add(new NamedXContentRegistry.Entry(BaseCustomResponseSection.class, new ParseField("custom1"),
CustomResponseSection1::fromXContent));
entries.add(new NamedXContentRegistry.Entry(BaseCustomResponseSection.class, new ParseField("custom2"),
CustomResponseSection2::fromXContent));
return entries;
}
}
private abstract static class BaseCustomResponseSection {
static BaseCustomResponseSection fromXContent(XContentParser parser) throws IOException {
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
BaseCustomResponseSection custom = parser.namedObject(BaseCustomResponseSection.class, parser.currentName(), null);
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
return custom;
}
}
private static class CustomResponseSection1 extends BaseCustomResponseSection {
private final String value;
private CustomResponseSection1(String value) {
this.value = value;
}
static CustomResponseSection1 fromXContent(XContentParser parser) throws IOException {
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals("field", parser.currentName());
assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken());
CustomResponseSection1 responseSection1 = new CustomResponseSection1(parser.text());
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
return responseSection1;
}
}
private static class CustomResponseSection2 extends BaseCustomResponseSection {
private final String[] values;
private CustomResponseSection2(String[] values) {
this.values = values;
}
static CustomResponseSection2 fromXContent(XContentParser parser) throws IOException {
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals("array", parser.currentName());
assertEquals(XContentParser.Token.START_ARRAY, parser.nextToken());
List<String> values = new ArrayList<>();
while(parser.nextToken().isValue()) {
values.add(parser.text());
}
assertEquals(XContentParser.Token.END_ARRAY, parser.currentToken());
CustomResponseSection2 responseSection2 = new CustomResponseSection2(values.toArray(new String[values.size()]));
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
return responseSection2;
}
}
}

View File

@ -27,7 +27,6 @@ import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion;
import org.apache.http.RequestLine;
import org.apache.http.StatusLine;
import org.apache.http.entity.BasicHttpEntity;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
@ -40,6 +39,7 @@ import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.main.MainRequest;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.cbor.CborXContent;
@ -55,9 +55,9 @@ import org.mockito.internal.matchers.VarargMatcher;
import java.io.IOException;
import java.net.SocketTimeoutException;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.mockito.Matchers.anyMapOf;
@ -139,17 +139,17 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testParseEntity() throws IOException {
{
IllegalStateException ise = expectThrows(IllegalStateException.class, () -> RestHighLevelClient.parseEntity(null, null));
IllegalStateException ise = expectThrows(IllegalStateException.class, () -> restHighLevelClient.parseEntity(null, null));
assertEquals("Response body expected but not returned", ise.getMessage());
}
{
IllegalStateException ise = expectThrows(IllegalStateException.class,
() -> RestHighLevelClient.parseEntity(new BasicHttpEntity(), null));
() -> restHighLevelClient.parseEntity(new StringEntity("", (ContentType) null), null));
assertEquals("Elasticsearch didn't return the [Content-Type] header, unable to parse response body", ise.getMessage());
}
{
StringEntity entity = new StringEntity("", ContentType.APPLICATION_SVG_XML);
IllegalStateException ise = expectThrows(IllegalStateException.class, () -> RestHighLevelClient.parseEntity(entity, null));
IllegalStateException ise = expectThrows(IllegalStateException.class, () -> restHighLevelClient.parseEntity(entity, null));
assertEquals("Unsupported Content-Type: " + entity.getContentType().getValue(), ise.getMessage());
}
{
@ -162,13 +162,13 @@ public class RestHighLevelClientTests extends ESTestCase {
return value;
};
HttpEntity jsonEntity = new StringEntity("{\"field\":\"value\"}", ContentType.APPLICATION_JSON);
assertEquals("value", RestHighLevelClient.parseEntity(jsonEntity, entityParser));
assertEquals("value", restHighLevelClient.parseEntity(jsonEntity, entityParser));
HttpEntity yamlEntity = new StringEntity("---\nfield: value\n", ContentType.create("application/yaml"));
assertEquals("value", RestHighLevelClient.parseEntity(yamlEntity, entityParser));
assertEquals("value", restHighLevelClient.parseEntity(yamlEntity, entityParser));
HttpEntity smileEntity = createBinaryEntity(SmileXContent.contentBuilder(), ContentType.create("application/smile"));
assertEquals("value", RestHighLevelClient.parseEntity(smileEntity, entityParser));
assertEquals("value", restHighLevelClient.parseEntity(smileEntity, entityParser));
HttpEntity cborEntity = createBinaryEntity(CborXContent.contentBuilder(), ContentType.create("application/cbor"));
assertEquals("value", RestHighLevelClient.parseEntity(cborEntity, entityParser));
assertEquals("value", restHighLevelClient.parseEntity(cborEntity, entityParser));
}
}
@ -195,7 +195,7 @@ public class RestHighLevelClientTests extends ESTestCase {
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(response);
ElasticsearchException elasticsearchException = RestHighLevelClient.parseResponseException(responseException);
ElasticsearchException elasticsearchException = restHighLevelClient.parseResponseException(responseException);
assertEquals(responseException.getMessage(), elasticsearchException.getMessage());
assertEquals(restStatus, elasticsearchException.status());
assertSame(responseException, elasticsearchException.getCause());
@ -207,7 +207,7 @@ public class RestHighLevelClientTests extends ESTestCase {
ContentType.APPLICATION_JSON));
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(response);
ElasticsearchException elasticsearchException = RestHighLevelClient.parseResponseException(responseException);
ElasticsearchException elasticsearchException = restHighLevelClient.parseResponseException(responseException);
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
assertEquals(restStatus, elasticsearchException.status());
assertSame(responseException, elasticsearchException.getSuppressed()[0]);
@ -218,7 +218,7 @@ public class RestHighLevelClientTests extends ESTestCase {
httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON));
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(response);
ElasticsearchException elasticsearchException = RestHighLevelClient.parseResponseException(responseException);
ElasticsearchException elasticsearchException = restHighLevelClient.parseResponseException(responseException);
assertEquals("Unable to parse response body", elasticsearchException.getMessage());
assertEquals(restStatus, elasticsearchException.status());
assertSame(responseException, elasticsearchException.getCause());
@ -230,7 +230,7 @@ public class RestHighLevelClientTests extends ESTestCase {
httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON));
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(response);
ElasticsearchException elasticsearchException = RestHighLevelClient.parseResponseException(responseException);
ElasticsearchException elasticsearchException = restHighLevelClient.parseResponseException(responseException);
assertEquals("Unable to parse response body", elasticsearchException.getMessage());
assertEquals(restStatus, elasticsearchException.status());
assertSame(responseException, elasticsearchException.getCause());
@ -240,7 +240,8 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnSuccess() throws IOException {
MainRequest mainRequest = new MainRequest();
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
new Request("GET", "/", Collections.emptyMap(), null);
RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
@ -261,7 +262,8 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOException {
MainRequest mainRequest = new MainRequest();
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
new Request("GET", "/", Collections.emptyMap(), null);
RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
@ -278,7 +280,8 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithEntity() throws IOException {
MainRequest mainRequest = new MainRequest();
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
new Request("GET", "/", Collections.emptyMap(), null);
RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}",
@ -297,7 +300,8 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOException {
MainRequest mainRequest = new MainRequest();
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
new Request("GET", "/", Collections.emptyMap(), null);
RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON));
@ -316,7 +320,8 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOException {
MainRequest mainRequest = new MainRequest();
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
new Request("GET", "/", Collections.emptyMap(), null);
RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON));
@ -335,7 +340,8 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException {
MainRequest mainRequest = new MainRequest();
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
new Request("GET", "/", Collections.emptyMap(), null);
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(mockResponse);
@ -348,7 +354,8 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException {
MainRequest mainRequest = new MainRequest();
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
new Request("GET", "/", Collections.emptyMap(), null);
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(mockResponse);
@ -364,7 +371,8 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException {
MainRequest mainRequest = new MainRequest();
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
new Request("GET", "/", Collections.emptyMap(), null);
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}",
ContentType.APPLICATION_JSON));
@ -383,7 +391,7 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testWrapResponseListenerOnSuccess() throws IOException {
{
TrackingActionListener trackingActionListener = new TrackingActionListener();
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
@ -393,7 +401,7 @@ public class RestHighLevelClientTests extends ESTestCase {
}
{
TrackingActionListener trackingActionListener = new TrackingActionListener();
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
response -> {throw new IllegalStateException();}, trackingActionListener, Collections.emptySet());
RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
@ -408,7 +416,7 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testWrapResponseListenerOnException() throws IOException {
TrackingActionListener trackingActionListener = new TrackingActionListener();
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
IllegalStateException exception = new IllegalStateException();
responseListener.onFailure(exception);
@ -417,7 +425,7 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testWrapResponseListenerOnResponseExceptionWithoutEntity() throws IOException {
TrackingActionListener trackingActionListener = new TrackingActionListener();
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
@ -433,7 +441,7 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testWrapResponseListenerOnResponseExceptionWithEntity() throws IOException {
TrackingActionListener trackingActionListener = new TrackingActionListener();
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
@ -452,7 +460,7 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testWrapResponseListenerOnResponseExceptionWithBrokenEntity() throws IOException {
{
TrackingActionListener trackingActionListener = new TrackingActionListener();
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
@ -469,7 +477,7 @@ public class RestHighLevelClientTests extends ESTestCase {
}
{
TrackingActionListener trackingActionListener = new TrackingActionListener();
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
@ -488,7 +496,7 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testWrapResponseListenerOnResponseExceptionWithIgnores() throws IOException {
TrackingActionListener trackingActionListener = new TrackingActionListener();
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.singleton(404));
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
@ -503,7 +511,7 @@ public class RestHighLevelClientTests extends ESTestCase {
TrackingActionListener trackingActionListener = new TrackingActionListener();
//response parsing throws exception while handling ignores. same as when GetResponse#fromXContent throws error when trying
//to parse a 404 response which contains an error rather than a valid document not found response.
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
response -> { throw new IllegalStateException(); }, trackingActionListener, Collections.singleton(404));
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
@ -520,7 +528,7 @@ public class RestHighLevelClientTests extends ESTestCase {
TrackingActionListener trackingActionListener = new TrackingActionListener();
//response parsing throws exception while handling ignores. same as when GetResponse#fromXContent throws error when trying
//to parse a 404 response which contains an error rather than a valid document not found response.
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
response -> { throw new IllegalStateException(); }, trackingActionListener, Collections.singleton(404));
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}",
@ -535,6 +543,11 @@ public class RestHighLevelClientTests extends ESTestCase {
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
}
public void testNamedXContents() throws IOException {
List<NamedXContentRegistry.Entry> namedXContents = RestHighLevelClient.getNamedXContents();
assertEquals(0, namedXContents.size());
}
private static class TrackingActionListener implements ActionListener<Integer> {
private final AtomicInteger statusCode = new AtomicInteger(-1);
private final AtomicReference<Exception> exception = new AtomicReference<>();

View File

@ -49,8 +49,9 @@ dependencies {
}
forbiddenApisMain {
//client does not depend on core, so only jdk signatures should be checked
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
//client does not depend on core, so only jdk and http signatures should be checked
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
}
forbiddenApisTest {
@ -58,7 +59,8 @@ forbiddenApisTest {
bundledSignatures -= 'jdk-non-portable'
bundledSignatures += 'jdk-internal'
//client does not depend on core, so only jdk signatures should be checked
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
PrecommitTasks.getResource('/forbidden/http-signatures.txt')]
}
dependencyLicenses {

View File

@ -20,6 +20,7 @@
package org.elasticsearch.client;
import org.apache.http.ContentTooLongException;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion;
import org.apache.http.StatusLine;
@ -32,6 +33,8 @@ import org.apache.http.nio.ContentDecoder;
import org.apache.http.nio.IOControl;
import org.apache.http.protocol.HttpContext;
import java.util.concurrent.atomic.AtomicReference;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
@ -56,7 +59,7 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase {
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
HttpResponse httpResponse = new BasicHttpResponse(statusLine);
httpResponse.setEntity(new StringEntity("test"));
httpResponse.setEntity(new StringEntity("test", ContentType.TEXT_PLAIN));
//everything goes well
consumer.responseReceived(httpResponse);
@ -99,11 +102,17 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase {
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
consumer.onResponseReceived(new BasicHttpResponse(statusLine));
BasicHttpEntity entity = new BasicHttpEntity();
entity.setContentLength(randomInt(bufferLimit));
final AtomicReference<Long> contentLength = new AtomicReference<>();
HttpEntity entity = new StringEntity("", ContentType.APPLICATION_JSON) {
@Override
public long getContentLength() {
return contentLength.get();
}
};
contentLength.set(randomLong(bufferLimit));
consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON);
entity.setContentLength(randomIntBetween(bufferLimit + 1, MAX_TEST_BUFFER_SIZE));
contentLength.set(randomLongBetween(bufferLimit + 1, MAX_TEST_BUFFER_SIZE));
try {
consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON);
} catch(ContentTooLongException e) {

View File

@ -31,6 +31,7 @@ import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpTrace;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.InputStreamEntity;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
@ -71,20 +72,21 @@ public class RequestLoggerTests extends RestClientTestCase {
HttpEntity entity;
switch(randomIntBetween(0, 4)) {
case 0:
entity = new StringEntity(requestBody, StandardCharsets.UTF_8);
entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON);
break;
case 1:
entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8)));
entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8)),
ContentType.APPLICATION_JSON);
break;
case 2:
entity = new NStringEntity(requestBody, StandardCharsets.UTF_8);
entity = new NStringEntity(requestBody, ContentType.APPLICATION_JSON);
break;
case 3:
entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8));
entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON);
break;
case 4:
// Evil entity without a charset
entity = new StringEntity(requestBody, (Charset) null);
entity = new StringEntity(requestBody, ContentType.create("application/json", (Charset) null));
break;
default:
throw new UnsupportedOperationException();
@ -122,15 +124,16 @@ public class RequestLoggerTests extends RestClientTestCase {
HttpEntity entity;
switch(randomIntBetween(0, 2)) {
case 0:
entity = new StringEntity(responseBody, StandardCharsets.UTF_8);
entity = new StringEntity(responseBody, ContentType.APPLICATION_JSON);
break;
case 1:
//test a non repeatable entity
entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)));
entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)),
ContentType.APPLICATION_JSON);
break;
case 2:
// Evil entity without a charset
entity = new StringEntity(responseBody, (Charset) null);
entity = new StringEntity(responseBody, ContentType.create("application/json", (Charset) null));
break;
default:
throw new UnsupportedOperationException();

View File

@ -25,6 +25,7 @@ import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion;
import org.apache.http.RequestLine;
import org.apache.http.StatusLine;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.InputStreamEntity;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHttpResponse;
@ -52,10 +53,11 @@ public class ResponseExceptionTests extends RestClientTestCase {
if (hasBody) {
HttpEntity entity;
if (getRandom().nextBoolean()) {
entity = new StringEntity(responseBody, StandardCharsets.UTF_8);
entity = new StringEntity(responseBody, ContentType.APPLICATION_JSON);
} else {
//test a non repeatable entity
entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)));
entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)),
ContentType.APPLICATION_JSON);
}
httpResponse.setEntity(entity);
}

View File

@ -28,6 +28,7 @@ import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
@ -249,7 +250,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
private Response bodyTest(final RestClient restClient, final String method) throws IOException {
String requestBody = "{ \"field\": \"value\" }";
StringEntity entity = new StringEntity(requestBody);
StringEntity entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON);
int statusCode = randomStatusCode(getRandom());
Response esResponse;
try {

View File

@ -38,6 +38,7 @@ import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.concurrent.FutureCallback;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.auth.BasicScheme;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
@ -293,7 +294,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
*/
public void testBody() throws IOException {
String body = "{ \"field\": \"value\" }";
StringEntity entity = new StringEntity(body);
StringEntity entity = new StringEntity(body, ContentType.APPLICATION_JSON);
for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) {
for (int okStatusCode : getOkStatusCodes()) {
Response response = restClient.performRequest(method, "/" + okStatusCode, Collections.<String, String>emptyMap(), entity);
@ -431,7 +432,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
HttpEntity entity = null;
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
if (hasBody) {
entity = new StringEntity(randomAsciiOfLengthBetween(10, 100));
entity = new StringEntity(randomAsciiOfLengthBetween(10, 100), ContentType.APPLICATION_JSON);
((HttpEntityEnclosingRequest) request).setEntity(entity);
}

View File

@ -74,7 +74,7 @@ dependencies {
// percentiles aggregation
compile 'com.tdunning:t-digest:3.0'
// precentil ranks aggregation
compile 'org.hdrhistogram:HdrHistogram:2.1.6'
compile 'org.hdrhistogram:HdrHistogram:2.1.9'
// lucene spatial
compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional

View File

@ -1 +0,0 @@
7495feb7f71ee124bd2a7e7d83590e296d71d80e

View File

@ -0,0 +1 @@
e4631ce165eb400edecfa32e03d3f1be53dee754

View File

@ -304,7 +304,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
* {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly
* if needed and then immediately returns.
*/
protected static void parseInnerToXContent(XContentParser parser, DocWriteResponseBuilder context) throws IOException {
protected static void parseInnerToXContent(XContentParser parser, Builder context) throws IOException {
XContentParser.Token token = parser.currentToken();
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
@ -348,9 +348,11 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
}
/**
* {@link DocWriteResponseBuilder} is used to build {@link DocWriteResponse} objects during XContent parsing.
* Base class of all {@link DocWriteResponse} builders. These {@link DocWriteResponse.Builder} are used during
* xcontent parsing to temporarily store the parsed values, then the {@link Builder#build()} method is called to
* instantiate the appropriate {@link DocWriteResponse} with the parsed values.
*/
public abstract static class DocWriteResponseBuilder {
public abstract static class Builder {
protected ShardId shardId = null;
protected String type = null;

View File

@ -139,19 +139,6 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
return this;
}
/**
* Sets the repository settings.
*
* @param source repository settings in json or yaml format
* @return this request
* @deprecated use {@link #settings(String, XContentType)} to avoid content type auto-detection
*/
@Deprecated
public PutRepositoryRequest settings(String source) {
this.settings = Settings.builder().loadFromSource(source).build();
return this;
}
/**
* Sets the repository settings.
*

View File

@ -89,19 +89,6 @@ public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder<PutR
return this;
}
/**
* Sets the repository settings in Json or Yaml format
*
* @param source repository settings
* @return this builder
* @deprecated use {@link #setSettings(String, XContentType)} instead to avoid content type auto detection
*/
@Deprecated
public PutRepositoryRequestBuilder setSettings(String source) {
request.settings(source);
return this;
}
/**
* Sets the repository settings in Json or Yaml format
*

View File

@ -81,16 +81,6 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
return this;
}
/**
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
* @deprecated use {@link #transientSettings(String, XContentType)} to avoid content type detection
*/
@Deprecated
public ClusterUpdateSettingsRequest transientSettings(String source) {
this.transientSettings = Settings.builder().loadFromSource(source).build();
return this;
}
/**
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
*/
@ -130,16 +120,6 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
return this;
}
/**
* Sets the source containing the persistent settings to be updated. They will get applied cross restarts
* @deprecated use {@link #persistentSettings(String, XContentType)} to avoid content type detection
*/
@Deprecated
public ClusterUpdateSettingsRequest persistentSettings(String source) {
this.persistentSettings = Settings.builder().loadFromSource(source).build();
return this;
}
/**
* Sets the source containing the persistent settings to be updated. They will get applied cross restarts
*/

View File

@ -51,16 +51,6 @@ public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuil
return this;
}
/**
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
* @deprecated use {@link #setTransientSettings(String, XContentType)} to avoid content type detection
*/
@Deprecated
public ClusterUpdateSettingsRequestBuilder setTransientSettings(String settings) {
request.transientSettings(settings);
return this;
}
/**
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
*/
@ -93,16 +83,6 @@ public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuil
return this;
}
/**
* Sets the source containing the persistent settings to be updated. They will get applied cross restarts
* @deprecated use {@link #setPersistentSettings(String, XContentType)} to avoid content type detection
*/
@Deprecated
public ClusterUpdateSettingsRequestBuilder setPersistentSettings(String settings) {
request.persistentSettings(settings);
return this;
}
/**
* Sets the source containing the persistent settings to be updated. They will get applied cross restarts
*/

View File

@ -287,21 +287,6 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
return this;
}
/**
* Sets repository-specific snapshot settings in JSON or YAML format
* <p>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this request
* @deprecated use {@link #settings(String, XContentType)} to avoid content type detection
*/
@Deprecated
public CreateSnapshotRequest settings(String source) {
this.settings = Settings.builder().loadFromSource(source).build();
return this;
}
/**
* Sets repository-specific snapshot settings in JSON or YAML format
* <p>

View File

@ -141,21 +141,6 @@ public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuil
return this;
}
/**
* Sets repository-specific snapshot settings in YAML, JSON or properties format
* <p>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this builder
* @deprecated use {@link #setSettings(String, XContentType)} to avoid content type detection
*/
@Deprecated
public CreateSnapshotRequestBuilder setSettings(String source) {
request.settings(source);
return this;
}
/**
* Sets repository-specific snapshot settings in YAML or JSON format
* <p>

View File

@ -312,21 +312,6 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
return this;
}
/**
* Sets repository-specific restore settings in JSON or YAML format
* <p>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this request
* @deprecated use {@link #settings(String, XContentType)} to avoid content type detection
*/
@Deprecated
public RestoreSnapshotRequest settings(String source) {
this.settings = Settings.builder().loadFromSource(source).build();
return this;
}
/**
* Sets repository-specific restore settings in JSON or YAML format
* <p>
@ -450,16 +435,6 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
return this;
}
/**
* Sets settings that should be added/changed in all restored indices
* @deprecated use {@link #indexSettings(String, XContentType)} to avoid content type detection
*/
@Deprecated
public RestoreSnapshotRequest indexSettings(String source) {
this.indexSettings = Settings.builder().loadFromSource(source).build();
return this;
}
/**
* Sets settings that should be added/changed in all restored indices
*/

View File

@ -153,21 +153,6 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui
return this;
}
/**
* Sets repository-specific restore settings in JSON or YAML format
* <p>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this builder
* @deprecated use {@link #setSettings(String, XContentType)} to avoid content type detection
*/
@Deprecated
public RestoreSnapshotRequestBuilder setSettings(String source) {
request.settings(source);
return this;
}
/**
* Sets repository-specific restore settings in JSON or YAML format
* <p>
@ -263,19 +248,6 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui
return this;
}
/**
* Sets index settings that should be added or replaced during restore
*
* @param source index settings
* @return this builder
* @deprecated use {@link #setIndexSettings(String, XContentType)} to avoid content type detection
*/
@Deprecated
public RestoreSnapshotRequestBuilder setIndexSettings(String source) {
request.indexSettings(source);
return this;
}
/**
* Sets index settings that should be added or replaced during restore
*

View File

@ -45,11 +45,6 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
super();
}
@Deprecated
public PutStoredScriptRequest(String id, String lang, BytesReference content) {
this(id, lang, content, XContentFactory.xContentType(content));
}
public PutStoredScriptRequest(String id, String lang, BytesReference content, XContentType xContentType) {
super();
this.id = id;
@ -107,15 +102,6 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
return xContentType;
}
/**
* Set the script source using bytes.
* @deprecated this method is deprecated as it relies on content type detection. Use {@link #content(BytesReference, XContentType)}
*/
@Deprecated
public PutStoredScriptRequest content(BytesReference content) {
return content(content, XContentFactory.xContentType(content));
}
/**
* Set the script source and the content type of the bytes.
*/

View File

@ -36,16 +36,6 @@ public class PutStoredScriptRequestBuilder extends AcknowledgedRequestBuilder<Pu
return this;
}
/**
* Set the source of the script.
* @deprecated this method requires content type detection. Use {@link #setContent(BytesReference, XContentType)} instead
*/
@Deprecated
public PutStoredScriptRequestBuilder setContent(BytesReference content) {
request.content(content);
return this;
}
/**
* Set the source of the script along with the content type of the source
*/

View File

@ -170,16 +170,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
return this;
}
/**
* The settings to create the index with (either json or yaml format)
* @deprecated use {@link #source(String, XContentType)} instead to avoid content type detection
*/
@Deprecated
public CreateIndexRequest settings(String source) {
this.settings = Settings.builder().loadFromSource(source).build();
return this;
}
/**
* The settings to create the index with (either json or yaml format)
*/
@ -215,18 +205,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
return this;
}
/**
* Adds mapping that will be added when the index gets created.
*
* @param type The mapping type
* @param source The mapping source
* @deprecated use {@link #mapping(String, String, XContentType)} to avoid content type detection
*/
@Deprecated
public CreateIndexRequest mapping(String type, String source) {
return mapping(type, new BytesArray(source), XContentFactory.xContentType(source));
}
/**
* Adds mapping that will be added when the index gets created.
*
@ -362,15 +340,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
return this;
}
/**
* Sets the settings and mappings as a single source.
* @deprecated use {@link #source(String, XContentType)}
*/
@Deprecated
public CreateIndexRequest source(String source) {
return source(new BytesArray(source));
}
/**
* Sets the settings and mappings as a single source.
*/
@ -382,16 +351,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
* Sets the settings and mappings as a single source.
*/
public CreateIndexRequest source(XContentBuilder source) {
return source(source.bytes());
}
/**
* Sets the settings and mappings as a single source.
* @deprecated use {@link #source(byte[], XContentType)}
*/
@Deprecated
public CreateIndexRequest source(byte[] source) {
return source(source, 0, source.length);
return source(source.bytes(), source.contentType());
}
/**
@ -401,15 +361,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
return source(source, 0, source.length, xContentType);
}
/**
* Sets the settings and mappings as a single source.
* @deprecated use {@link #source(byte[], int, int, XContentType)}
*/
@Deprecated
public CreateIndexRequest source(byte[] source, int offset, int length) {
return source(new BytesArray(source, offset, length));
}
/**
* Sets the settings and mappings as a single source.
*/
@ -417,17 +368,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
return source(new BytesArray(source, offset, length), xContentType);
}
/**
* Sets the settings and mappings as a single source.
* @deprecated use {@link #source(BytesReference, XContentType)}
*/
@Deprecated
public CreateIndexRequest source(BytesReference source) {
XContentType xContentType = XContentFactory.xContentType(source);
source(source, xContentType);
return this;
}
/**
* Sets the settings and mappings as a single source.
*/

View File

@ -110,19 +110,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
return this;
}
/**
* Adds mapping that will be added when the index gets created.
*
* @param type The mapping type
* @param source The mapping source
* @deprecated use {@link #addMapping(String, String, XContentType)} to avoid content type auto-detection
*/
@Deprecated
public CreateIndexRequestBuilder addMapping(String type, String source) {
request.mapping(type, source);
return this;
}
/**
* Adds mapping that will be added when the index gets created.
*
@ -214,16 +201,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
return this;
}
/**
* Sets the settings and mappings as a single source.
* @deprecated use {@link #setSource(String, XContentType)}
*/
@Deprecated
public CreateIndexRequestBuilder setSource(String source) {
request.source(source);
return this;
}
/**
* Sets the settings and mappings as a single source.
*/
@ -232,16 +209,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
return this;
}
/**
* Sets the settings and mappings as a single source.
* @deprecated use {@link #setSource(BytesReference, XContentType)}
*/
@Deprecated
public CreateIndexRequestBuilder setSource(BytesReference source) {
request.source(source);
return this;
}
/**
* Sets the settings and mappings as a single source.
*/
@ -250,16 +217,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
return this;
}
/**
* Sets the settings and mappings as a single source.
* @deprecated use {@link #setSource(byte[], XContentType)}
*/
@Deprecated
public CreateIndexRequestBuilder setSource(byte[] source) {
request.source(source);
return this;
}
/**
* Sets the settings and mappings as a single source.
*/
@ -268,16 +225,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
return this;
}
/**
* Sets the settings and mappings as a single source.
* @deprecated use {@link #setSource(byte[], int, int, XContentType)}
*/
@Deprecated
public CreateIndexRequestBuilder setSource(byte[] source, int offset, int length) {
request.source(source, offset, length);
return this;
}
/**
* Sets the settings and mappings as a single source.
*/

View File

@ -270,15 +270,6 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
}
}
/**
* The mapping source definition.
* @deprecated use {@link #source(String, XContentType)}
*/
@Deprecated
public PutMappingRequest source(String mappingSource) {
return source(mappingSource, XContentFactory.xContentType(mappingSource));
}
/**
* The mapping source definition.
*/

View File

@ -81,16 +81,6 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder<PutMapp
return this;
}
/**
* The mapping source definition.
* @deprecated use {@link #setSource(String, XContentType)}
*/
@Deprecated
public PutMappingRequestBuilder setSource(String mappingSource) {
request.source(mappingSource);
return this;
}
/**
* The mapping source definition.
*/

View File

@ -120,16 +120,6 @@ public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsReq
return this;
}
/**
* Sets the settings to be updated (either json or yaml format)
* @deprecated use {@link #settings(String, XContentType)} to avoid content type detection
*/
@Deprecated
public UpdateSettingsRequest settings(String source) {
this.settings = Settings.builder().loadFromSource(source).build();
return this;
}
/**
* Sets the settings to be updated (either json or yaml format)
*/

View File

@ -70,16 +70,6 @@ public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder<Upd
return this;
}
/**
* Sets the settings to be updated (either json or yaml format)
* @deprecated use {@link #setSettings(String, XContentType)} to avoid content type detection
*/
@Deprecated
public UpdateSettingsRequestBuilder setSettings(String source) {
request.settings(source);
return this;
}
/**
* Sets the settings to be updated (either json or yaml format)
*/

View File

@ -180,16 +180,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
return this;
}
/**
* The settings to create the index template with (either json/yaml format).
* @deprecated use {@link #settings(String, XContentType)}
*/
@Deprecated
public PutIndexTemplateRequest settings(String source) {
this.settings = Settings.builder().loadFromSource(source).build();
return this;
}
/**
* The settings to create the index template with (either json/yaml format).
*/
@ -216,19 +206,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
return this.settings;
}
/**
* Adds mapping that will be added when the index gets created.
*
* @param type The mapping type
* @param source The mapping source
* @deprecated use {@link #mapping(String, String, XContentType)}
*/
@Deprecated
public PutIndexTemplateRequest mapping(String type, String source) {
XContentType xContentType = XContentFactory.xContentType(source);
return mapping(type, source, xContentType);
}
/**
* Adds mapping that will be added when the index gets created.
*
@ -385,15 +362,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
return this;
}
/**
* The template source definition.
* @deprecated use {@link #source(String, XContentType)}
*/
@Deprecated
public PutIndexTemplateRequest source(String templateSource) {
return source(XContentHelper.convertToMap(XContentFactory.xContent(templateSource), templateSource, true));
}
/**
* The template source definition.
*/
@ -401,15 +369,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
return source(XContentHelper.convertToMap(xContentType.xContent(), templateSource, true));
}
/**
* The template source definition.
* @deprecated use {@link #source(byte[], XContentType)}
*/
@Deprecated
public PutIndexTemplateRequest source(byte[] source) {
return source(source, 0, source.length);
}
/**
* The template source definition.
*/
@ -417,15 +376,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
return source(source, 0, source.length, xContentType);
}
/**
* The template source definition.
* @deprecated use {@link #source(byte[], int, int, XContentType)}
*/
@Deprecated
public PutIndexTemplateRequest source(byte[] source, int offset, int length) {
return source(new BytesArray(source, offset, length));
}
/**
* The template source definition.
*/
@ -433,15 +383,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
return source(new BytesArray(source, offset, length), xContentType);
}
/**
* The template source definition.
* @deprecated use {@link #source(BytesReference, XContentType)}
*/
@Deprecated
public PutIndexTemplateRequest source(BytesReference source) {
return source(XContentHelper.convertToMap(source, true).v2());
}
/**
* The template source definition.
*/

View File

@ -100,16 +100,6 @@ public class PutIndexTemplateRequestBuilder
return this;
}
/**
* The settings to crete the index template with (either json or yaml format)
* @deprecated use {@link #setSettings(String, XContentType)}
*/
@Deprecated
public PutIndexTemplateRequestBuilder setSettings(String source) {
request.settings(source);
return this;
}
/**
* The settings to crete the index template with (either json or yaml format)
*/
@ -126,19 +116,6 @@ public class PutIndexTemplateRequestBuilder
return this;
}
/**
* Adds mapping that will be added when the index template gets created.
*
* @param type The mapping type
* @param source The mapping source
* @deprecated use {@link #addMapping(String, String, XContentType)}
*/
@Deprecated
public PutIndexTemplateRequestBuilder addMapping(String type, String source) {
request.mapping(type, source);
return this;
}
/**
* Adds mapping that will be added when the index template gets created.
*
@ -249,16 +226,6 @@ public class PutIndexTemplateRequestBuilder
return this;
}
/**
* The template source definition.
* @deprecated use {@link #setSource(BytesReference, XContentType)}
*/
@Deprecated
public PutIndexTemplateRequestBuilder setSource(String templateSource) {
request.source(templateSource);
return this;
}
/**
* The template source definition.
*/
@ -267,26 +234,6 @@ public class PutIndexTemplateRequestBuilder
return this;
}
/**
* The template source definition.
* @deprecated use {@link #setSource(BytesReference, XContentType)}
*/
@Deprecated
public PutIndexTemplateRequestBuilder setSource(BytesReference templateSource) {
request.source(templateSource);
return this;
}
/**
* The template source definition.
* @deprecated use {@link #setSource(byte[], XContentType)}
*/
@Deprecated
public PutIndexTemplateRequestBuilder setSource(byte[] templateSource) {
request.source(templateSource);
return this;
}
/**
* The template source definition.
*/
@ -295,16 +242,6 @@ public class PutIndexTemplateRequestBuilder
return this;
}
/**
* The template source definition.
* @deprecated use {@link #setSource(byte[], int, int, XContentType)}
*/
@Deprecated
public PutIndexTemplateRequestBuilder setSource(byte[] templateSource, int offset, int length) {
request.source(templateSource, offset, length);
return this;
}
/**
* The template source definition.
*/

View File

@ -19,7 +19,9 @@
package org.elasticsearch.action.bulk;
import org.elasticsearch.Version;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
@ -31,13 +33,12 @@ public class BulkItemRequest implements Streamable {
private int id;
private DocWriteRequest request;
private volatile BulkItemResponse primaryResponse;
private volatile boolean ignoreOnReplica;
BulkItemRequest() {
}
public BulkItemRequest(int id, DocWriteRequest request) {
protected BulkItemRequest(int id, DocWriteRequest request) {
this.id = id;
this.request = request;
}
@ -55,25 +56,16 @@ public class BulkItemRequest implements Streamable {
return request.indices()[0];
}
BulkItemResponse getPrimaryResponse() {
// NOTE: protected for testing only
protected BulkItemResponse getPrimaryResponse() {
return primaryResponse;
}
void setPrimaryResponse(BulkItemResponse primaryResponse) {
// NOTE: protected for testing only
protected void setPrimaryResponse(BulkItemResponse primaryResponse) {
this.primaryResponse = primaryResponse;
}
/**
* Marks this request to be ignored and *not* execute on a replica.
*/
void setIgnoreOnReplica() {
this.ignoreOnReplica = true;
}
boolean isIgnoreOnReplica() {
return ignoreOnReplica;
}
public static BulkItemRequest readBulkItem(StreamInput in) throws IOException {
BulkItemRequest item = new BulkItemRequest();
item.readFrom(in);
@ -87,14 +79,37 @@ public class BulkItemRequest implements Streamable {
if (in.readBoolean()) {
primaryResponse = BulkItemResponse.readBulkItem(in);
}
ignoreOnReplica = in.readBoolean();
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported
boolean ignoreOnReplica = in.readBoolean();
if (ignoreOnReplica == false && primaryResponse != null) {
assert primaryResponse.isFailed() == false : "expected no failure on the primary response";
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(id);
DocWriteRequest.writeDocumentRequest(out, request);
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported
// old nodes expect updated version and version type on the request
if (primaryResponse != null) {
request.version(primaryResponse.getVersion());
request.versionType(request.versionType().versionTypeForReplicationAndRecovery());
DocWriteRequest.writeDocumentRequest(out, request);
} else {
DocWriteRequest.writeDocumentRequest(out, request);
}
} else {
DocWriteRequest.writeDocumentRequest(out, request);
}
out.writeOptionalStreamable(primaryResponse);
out.writeBoolean(ignoreOnReplica);
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported
if (primaryResponse != null) {
out.writeBoolean(primaryResponse.isFailed()
|| primaryResponse.getResponse().getResult() == DocWriteResponse.Result.NOOP);
} else {
out.writeBoolean(false);
}
}
}
}

View File

@ -40,7 +40,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.function.Supplier;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
@ -102,21 +101,21 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
final OpType opType = OpType.fromString(currentFieldName);
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
DocWriteResponse.DocWriteResponseBuilder builder = null;
DocWriteResponse.Builder builder = null;
CheckedConsumer<XContentParser, IOException> itemParser = null;
if (opType == OpType.INDEX || opType == OpType.CREATE) {
final IndexResponse.IndexResponseBuilder indexResponseBuilder = new IndexResponse.IndexResponseBuilder();
final IndexResponse.Builder indexResponseBuilder = new IndexResponse.Builder();
builder = indexResponseBuilder;
itemParser = (indexParser) -> IndexResponse.parseXContentFields(indexParser, indexResponseBuilder);
} else if (opType == OpType.UPDATE) {
final UpdateResponse.UpdateResponseBuilder updateResponseBuilder = new UpdateResponse.UpdateResponseBuilder();
final UpdateResponse.Builder updateResponseBuilder = new UpdateResponse.Builder();
builder = updateResponseBuilder;
itemParser = (updateParser) -> UpdateResponse.parseXContentFields(updateParser, updateResponseBuilder);
} else if (opType == OpType.DELETE) {
final DeleteResponse.DeleteResponseBuilder deleteResponseBuilder = new DeleteResponse.DeleteResponseBuilder();
final DeleteResponse.Builder deleteResponseBuilder = new DeleteResponse.Builder();
builder = deleteResponseBuilder;
itemParser = (deleteParser) -> DeleteResponse.parseXContentFields(deleteParser, deleteResponseBuilder);
} else {

View File

@ -289,15 +289,6 @@ public class BulkProcessor implements Closeable {
executeIfNeeded();
}
/**
* Adds the data from the bytes to be processed by the bulk processor
* @deprecated use {@link #add(BytesReference, String, String, XContentType)} instead to avoid content type auto-detection
*/
@Deprecated
public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
return add(data, defaultIndex, defaultType, null, null);
}
/**
* Adds the data from the bytes to be processed by the bulk processor
*/
@ -306,19 +297,6 @@ public class BulkProcessor implements Closeable {
return add(data, defaultIndex, defaultType, null, null, xContentType);
}
/**
* Adds the data from the bytes to be processed by the bulk processor
* @deprecated use {@link #add(BytesReference, String, String, String, Object, XContentType)} instead to avoid content type
* auto-detection
*/
@Deprecated
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType,
@Nullable String defaultPipeline, @Nullable Object payload) throws Exception {
bulkRequest.add(data, defaultIndex, defaultType, null, null, null, defaultPipeline, payload, true);
executeIfNeeded();
return this;
}
/**
* Adds the data from the bytes to be processed by the bulk processor
*/

View File

@ -243,15 +243,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
return sizeInBytes;
}
/**
* Adds a framed data in binary format
* @deprecated use {@link #add(byte[], int, int, XContentType)}
*/
@Deprecated
public BulkRequest add(byte[] data, int from, int length) throws IOException {
return add(data, from, length, null, null);
}
/**
* Adds a framed data in binary format
*/
@ -259,15 +250,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
return add(data, from, length, null, null, xContentType);
}
/**
* Adds a framed data in binary format
* @deprecated use {@link #add(byte[], int, int, String, String, XContentType)}
*/
@Deprecated
public BulkRequest add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType) throws IOException {
return add(new BytesArray(data, from, length), defaultIndex, defaultType);
}
/**
* Adds a framed data in binary format
*/
@ -276,16 +258,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
return add(new BytesArray(data, from, length), defaultIndex, defaultType, xContentType);
}
/**
* Adds a framed data in binary format
*
* @deprecated use {@link #add(BytesReference, String, String, XContentType)}
*/
@Deprecated
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws IOException {
return add(data, defaultIndex, defaultType, null, null, null, null, null, true);
}
/**
* Adds a framed data in binary format
*/
@ -294,16 +266,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
return add(data, defaultIndex, defaultType, null, null, null, null, null, true, xContentType);
}
/**
* Adds a framed data in binary format
*
* @deprecated use {@link #add(BytesReference, String, String, boolean, XContentType)}
*/
@Deprecated
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws IOException {
return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex);
}
/**
* Adds a framed data in binary format
*/
@ -312,13 +274,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex, xContentType);
}
@Deprecated
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws IOException {
XContentType xContentType = XContentFactory.xContentType(data);
return add(data, defaultIndex, defaultType, defaultRouting, defaultFields, defaultFetchSourceContext, defaultPipeline, payload,
allowExplicitIndex, xContentType);
}
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String
defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String
defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex, XContentType xContentType) throws IOException {
@ -432,7 +387,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
}
line++;
// order is important, we set parent after routing, so routing will be set to parent if not set explicitly
// we use internalAdd so we don't fork here, this allows us not to copy over the big byte array to small chunks
// of index request.
if ("index".equals(action)) {

View File

@ -96,16 +96,6 @@ public class BulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkRe
return this;
}
/**
* Adds a framed data in binary format
* @deprecated use {@link #add(byte[], int, int, XContentType)}
*/
@Deprecated
public BulkRequestBuilder add(byte[] data, int from, int length) throws Exception {
request.add(data, from, length, null, null);
return this;
}
/**
* Adds a framed data in binary format
*/
@ -114,16 +104,6 @@ public class BulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkRe
return this;
}
/**
* Adds a framed data in binary format
* @deprecated use {@link #add(byte[], int, int, String, String, XContentType)}
*/
@Deprecated
public BulkRequestBuilder add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
request.add(data, from, length, defaultIndex, defaultType);
return this;
}
/**
* Adds a framed data in binary format
*/

View File

@ -23,17 +23,32 @@ import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.StatusToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken;
/**
* A response of a bulk execution. Holding a response for each item responding (in order) of the
* bulk requests. Each item holds the index/type/id is operated on, and if it failed or not (with the
* failure message).
*/
public class BulkResponse extends ActionResponse implements Iterable<BulkItemResponse> {
public class BulkResponse extends ActionResponse implements Iterable<BulkItemResponse>, StatusToXContentObject {
private static final String ITEMS = "items";
private static final String ERRORS = "errors";
private static final String TOOK = "took";
private static final String INGEST_TOOK = "ingest_took";
public static final long NO_INGEST_TOOK = -1L;
@ -141,4 +156,61 @@ public class BulkResponse extends ActionResponse implements Iterable<BulkItemRes
out.writeVLong(tookInMillis);
out.writeZLong(ingestTookInMillis);
}
@Override
public RestStatus status() {
return RestStatus.OK;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(TOOK, tookInMillis);
if (ingestTookInMillis != BulkResponse.NO_INGEST_TOOK) {
builder.field(INGEST_TOOK, ingestTookInMillis);
}
builder.field(ERRORS, hasFailures());
builder.startArray(ITEMS);
for (BulkItemResponse item : this) {
item.toXContent(builder, params);
}
builder.endArray();
builder.endObject();
return builder;
}
public static BulkResponse fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
long took = -1L;
long ingestTook = NO_INGEST_TOOK;
List<BulkItemResponse> items = new ArrayList<>();
String currentFieldName = parser.currentName();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (TOOK.equals(currentFieldName)) {
took = parser.longValue();
} else if (INGEST_TOOK.equals(currentFieldName)) {
ingestTook = parser.longValue();
} else if (ERRORS.equals(currentFieldName) == false) {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
} else if (token == XContentParser.Token.START_ARRAY) {
if (ITEMS.equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
items.add(BulkItemResponse.fromXContent(parser, items.size()));
}
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
} else {
throwUnknownToken(token, parser.getTokenLocation());
}
}
return new BulkResponse(items.toArray(new BulkItemResponse[items.size()]), took, ingestTook);
}
}

View File

@ -36,7 +36,7 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
public BulkShardRequest() {
}
BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) {
public BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) {
super(shardId);
this.items = items;
setRefreshPolicy(refreshPolicy);

View File

@ -36,7 +36,8 @@ public class BulkShardResponse extends ReplicationResponse implements WriteRespo
BulkShardResponse() {
}
BulkShardResponse(ShardId shardId, BulkItemResponse[] responses) {
// NOTE: public for testing only
public BulkShardResponse(ShardId shardId, BulkItemResponse[] responses) {
this.shardId = shardId;
this.responses = responses;
}

View File

@ -104,14 +104,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
public WritePrimaryResult<BulkShardRequest, BulkShardResponse> shardOperationOnPrimary(
BulkShardRequest request, IndexShard primary) throws Exception {
final IndexMetaData metaData = primary.indexSettings().getIndexMetaData();
long[] preVersions = new long[request.items().length];
VersionType[] preVersionTypes = new VersionType[request.items().length];
Translog.Location location = null;
for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) {
location = executeBulkItemRequest(metaData, primary, request, preVersions, preVersionTypes, location, requestIndex);
location = executeBulkItemRequest(metaData, primary, request, location, requestIndex);
}
BulkItemResponse[] responses = new BulkItemResponse[request.items().length];
BulkItemRequest[] items = request.items();
for (int i = 0; i < items.length; i++) {
@ -124,110 +120,73 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
/** Executes bulk item requests and handles request execution exceptions */
private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard primary,
BulkShardRequest request,
long[] preVersions, VersionType[] preVersionTypes,
Translog.Location location, int requestIndex) throws Exception {
final DocWriteRequest itemRequest = request.items()[requestIndex].request();
preVersions[requestIndex] = itemRequest.version();
preVersionTypes[requestIndex] = itemRequest.versionType();
DocWriteRequest.OpType opType = itemRequest.opType();
try {
// execute item request
final Engine.Result operationResult;
final DocWriteResponse response;
final BulkItemRequest replicaRequest;
switch (itemRequest.opType()) {
case CREATE:
case INDEX:
final IndexRequest indexRequest = (IndexRequest) itemRequest;
Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
if (indexResult.hasFailure()) {
response = null;
} else {
// update the version on request so it will happen on the replicas
final long version = indexResult.getVersion();
indexRequest.version(version);
indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery());
indexRequest.setSeqNo(indexResult.getSeqNo());
assert indexRequest.versionType().validateVersionForWrites(indexRequest.version());
response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(),
indexResult.getVersion(), indexResult.isCreated());
}
operationResult = indexResult;
replicaRequest = request.items()[requestIndex];
break;
case UPDATE:
UpdateResultHolder updateResultHolder = executeUpdateRequest(((UpdateRequest) itemRequest),
primary, metaData, request, requestIndex);
operationResult = updateResultHolder.operationResult;
response = updateResultHolder.response;
replicaRequest = updateResultHolder.replicaRequest;
break;
case DELETE:
final DeleteRequest deleteRequest = (DeleteRequest) itemRequest;
Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary);
if (deleteResult.hasFailure()) {
response = null;
} else {
// update the request with the version so it will go to the replicas
deleteRequest.versionType(deleteRequest.versionType().versionTypeForReplicationAndRecovery());
deleteRequest.version(deleteResult.getVersion());
deleteRequest.setSeqNo(deleteResult.getSeqNo());
assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version());
response = new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(),
deleteResult.getVersion(), deleteResult.isFound());
}
operationResult = deleteResult;
replicaRequest = request.items()[requestIndex];
break;
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
}
// update the bulk item request because update request execution can mutate the bulk item request
request.items()[requestIndex] = replicaRequest;
if (operationResult == null) { // in case of noop update operation
assert response.getResult() == DocWriteResponse.Result.NOOP
: "only noop update can have null operation";
replicaRequest.setIgnoreOnReplica();
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response));
} else if (operationResult.hasFailure() == false) {
location = locationToSync(location, operationResult.getTranslogLocation());
BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response);
replicaRequest.setPrimaryResponse(primaryResponse);
// set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though.
primaryResponse.getResponse().setShardInfo(new ShardInfo());
} else {
DocWriteRequest docWriteRequest = replicaRequest.request();
Exception failure = operationResult.getFailure();
if (isConflictException(failure)) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
} else {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
}
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) {
replicaRequest.setIgnoreOnReplica();
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(),
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)));
}
}
assert replicaRequest.getPrimaryResponse() != null;
assert preVersionTypes[requestIndex] != null;
} catch (Exception e) {
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
if (retryPrimaryException(e)) {
// restore updated versions...
for (int j = 0; j < requestIndex; j++) {
DocWriteRequest docWriteRequest = request.items()[j].request();
docWriteRequest.version(preVersions[j]);
docWriteRequest.versionType(preVersionTypes[j]);
}
}
throw e;
final DocWriteRequest.OpType opType = itemRequest.opType();
final Engine.Result operationResult;
final DocWriteResponse response;
final BulkItemRequest replicaRequest;
switch (itemRequest.opType()) {
case CREATE:
case INDEX:
final IndexRequest indexRequest = (IndexRequest) itemRequest;
Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
response = indexResult.hasFailure() ? null :
new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(),
indexResult.getVersion(), indexResult.isCreated());
operationResult = indexResult;
replicaRequest = request.items()[requestIndex];
break;
case UPDATE:
UpdateResultHolder updateResultHolder = executeUpdateRequest(((UpdateRequest) itemRequest),
primary, metaData, request, requestIndex);
operationResult = updateResultHolder.operationResult;
response = updateResultHolder.response;
replicaRequest = updateResultHolder.replicaRequest;
break;
case DELETE:
final DeleteRequest deleteRequest = (DeleteRequest) itemRequest;
Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary);
response = deleteResult.hasFailure() ? null :
new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(),
deleteResult.getVersion(), deleteResult.isFound());
operationResult = deleteResult;
replicaRequest = request.items()[requestIndex];
break;
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
}
// update the bulk item request because update request execution can mutate the bulk item request
request.items()[requestIndex] = replicaRequest;
if (operationResult == null) { // in case of noop update operation
assert response.getResult() == DocWriteResponse.Result.NOOP
: "only noop update can have null operation";
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response));
} else if (operationResult.hasFailure() == false) {
location = locationToSync(location, operationResult.getTranslogLocation());
BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response);
replicaRequest.setPrimaryResponse(primaryResponse);
// set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though.
primaryResponse.getResponse().setShardInfo(new ShardInfo());
} else {
DocWriteRequest docWriteRequest = replicaRequest.request();
Exception failure = operationResult.getFailure();
if (isConflictException(failure)) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
} else {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
}
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) {
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(),
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)));
}
}
assert replicaRequest.getPrimaryResponse() != null;
return location;
}
@ -266,7 +225,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
final UpdateHelper.Result translate;
// translate update request
try {
translate = updateHelper.prepare(updateRequest, primary, threadPool::estimatedTimeInMillis);
translate = updateHelper.prepare(updateRequest, primary, threadPool::absoluteTimeInMillis);
} catch (Exception failure) {
// we may fail translating a update to index or delete operation
// we use index result to communicate failure while translating update request
@ -281,25 +240,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
indexRequest.process(mappingMd, request.index());
updateOperationResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
if (updateOperationResult.hasFailure() == false) {
// update the version on request so it will happen on the replicas
final long version = updateOperationResult.getVersion();
indexRequest.version(version);
indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery());
indexRequest.setSeqNo(updateOperationResult.getSeqNo());
assert indexRequest.versionType().validateVersionForWrites(indexRequest.version());
}
break;
case DELETED:
DeleteRequest deleteRequest = translate.action();
updateOperationResult = executeDeleteRequestOnPrimary(deleteRequest, primary);
if (updateOperationResult.hasFailure() == false) {
// update the request with the version so it will go to the replicas
deleteRequest.versionType(deleteRequest.versionType().versionTypeForReplicationAndRecovery());
deleteRequest.version(updateOperationResult.getVersion());
deleteRequest.setSeqNo(updateOperationResult.getSeqNo());
assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version());
}
break;
case NOOP:
primary.noopUpdate(updateRequest.type());
@ -348,10 +292,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateDeleteRequest);
break;
}
assert (replicaRequest.request() instanceof IndexRequest
&& ((IndexRequest) replicaRequest.request()).getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) ||
(replicaRequest.request() instanceof DeleteRequest
&& ((DeleteRequest) replicaRequest.request()).getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO);
assert updateOperationResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO;
// successful operation
break; // out of retry loop
} else if (updateOperationResult.getFailure() instanceof VersionConflictEngineException == false) {
@ -367,20 +308,20 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
Translog.Location location = null;
for (int i = 0; i < request.items().length; i++) {
BulkItemRequest item = request.items()[i];
if (item.isIgnoreOnReplica() == false) {
assert item.getPrimaryResponse() != null : "expected primary response to be set for item [" + i + "] request ["+ item.request() +"]";
if (item.getPrimaryResponse().isFailed() == false &&
item.getPrimaryResponse().getResponse().getResult() != DocWriteResponse.Result.NOOP) {
DocWriteRequest docWriteRequest = item.request();
// ensure request version is updated for replica operation during request execution in the primary
assert docWriteRequest.versionType() == docWriteRequest.versionType().versionTypeForReplicationAndRecovery()
: "unexpected version in replica " + docWriteRequest.version();
DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse();
final Engine.Result operationResult;
try {
switch (docWriteRequest.opType()) {
case CREATE:
case INDEX:
operationResult = executeIndexRequestOnReplica((IndexRequest) docWriteRequest, replica);
operationResult = executeIndexRequestOnReplica(primaryResponse, (IndexRequest) docWriteRequest, replica);
break;
case DELETE:
operationResult = executeDeleteRequestOnReplica((DeleteRequest) docWriteRequest, replica);
operationResult = executeDeleteRequestOnReplica(primaryResponse, (DeleteRequest) docWriteRequest, replica);
break;
default:
throw new IllegalStateException("Unexpected request operation type on replica: "
@ -426,17 +367,21 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
* Execute the given {@link IndexRequest} on a replica shard, throwing a
* {@link RetryOnReplicaException} if the operation needs to be re-tried.
*/
public static Engine.IndexResult executeIndexRequestOnReplica(IndexRequest request, IndexShard replica) throws IOException {
public static Engine.IndexResult executeIndexRequestOnReplica(DocWriteResponse primaryResponse, IndexRequest request, IndexShard replica) throws IOException {
final ShardId shardId = replica.shardId();
SourceToParse sourceToParse =
SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source(),
request.getContentType()).routing(request.routing()).parent(request.parent());
final Engine.Index operation;
final long version = primaryResponse.getVersion();
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
assert versionType.validateVersionForWrites(version);
final long seqNo = primaryResponse.getSeqNo();
try {
operation = replica.prepareIndexOnReplica(sourceToParse, request.getSeqNo(), request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
operation = replica.prepareIndexOnReplica(sourceToParse, seqNo, version, versionType, request.getAutoGeneratedTimestamp(), request.isRetry());
} catch (MapperParsingException e) {
return new Engine.IndexResult(e, request.version(), request.getSeqNo());
return new Engine.IndexResult(e, version, seqNo);
}
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
@ -446,7 +391,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
}
/** Utility method to prepare an index operation on primary shards */
static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
private static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
SourceToParse sourceToParse =
SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source(),
request.getContentType()).routing(request.routing()).parent(request.parent());
@ -460,7 +405,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
try {
operation = prepareIndexOperationOnPrimary(request, primary);
} catch (MapperParsingException | IllegalArgumentException e) {
return new Engine.IndexResult(e, request.version(), request.getSeqNo());
return new Engine.IndexResult(e, request.version());
}
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
final ShardId shardId = primary.shardId();
@ -471,12 +416,12 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
} catch (IllegalArgumentException e) {
// throws IAE on conflicts merging dynamic mappings
return new Engine.IndexResult(e, request.version(), request.getSeqNo());
return new Engine.IndexResult(e, request.version());
}
try {
operation = prepareIndexOperationOnPrimary(request, primary);
} catch (MapperParsingException | IllegalArgumentException e) {
return new Engine.IndexResult(e, request.version(), request.getSeqNo());
return new Engine.IndexResult(e, request.version());
}
update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
@ -487,14 +432,17 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
return primary.index(operation);
}
public static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) throws IOException {
private static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) throws IOException {
final Engine.Delete delete = primary.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType());
return primary.delete(delete);
}
public static Engine.DeleteResult executeDeleteRequestOnReplica(DeleteRequest request, IndexShard replica) throws IOException {
private static Engine.DeleteResult executeDeleteRequestOnReplica(DocWriteResponse primaryResponse, DeleteRequest request, IndexShard replica) throws IOException {
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
final long version = primaryResponse.getVersion();
assert versionType.validateVersionForWrites(version);
final Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(),
request.getSeqNo(), request.primaryTerm(), request.version(), request.versionType());
primaryResponse.getSeqNo(), request.primaryTerm(), version, versionType);
return replica.delete(delete);
}
}

View File

@ -74,7 +74,7 @@ public class DeleteResponse extends DocWriteResponse {
public static DeleteResponse fromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
DeleteResponseBuilder context = new DeleteResponseBuilder();
Builder context = new Builder();
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
parseXContentFields(parser, context);
}
@ -84,7 +84,7 @@ public class DeleteResponse extends DocWriteResponse {
/**
* Parse the current token and update the parsing context appropriately.
*/
public static void parseXContentFields(XContentParser parser, DeleteResponseBuilder context) throws IOException {
public static void parseXContentFields(XContentParser parser, Builder context) throws IOException {
XContentParser.Token token = parser.currentToken();
String currentFieldName = parser.currentName();
@ -97,7 +97,12 @@ public class DeleteResponse extends DocWriteResponse {
}
}
public static class DeleteResponseBuilder extends DocWriteResponse.DocWriteResponseBuilder {
/**
* Builder class for {@link DeleteResponse}. This builder is usually used during xcontent parsing to
* temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to
* instantiate the {@link DeleteResponse}.
*/
public static class Builder extends DocWriteResponse.Builder {
private boolean found = false;

View File

@ -121,7 +121,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
/**
* Constructs a new index request against the specific index and type. The
* {@link #source(byte[])} must be set.
* {@link #source(byte[], XContentType)} must be set.
*/
public IndexRequest(String index, String type) {
this.index = index;
@ -316,16 +316,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
}
}
/**
* Sets the document source to index.
*
* @deprecated use {@link #source(String, XContentType)}
*/
@Deprecated
public IndexRequest source(String source) {
return source(new BytesArray(source), XContentFactory.xContentType(source));
}
/**
* Sets the document source to index.
*
@ -383,16 +373,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
}
}
/**
* Sets the document to index in bytes form.
* @deprecated use {@link #source(BytesReference, XContentType)}
*/
@Deprecated
public IndexRequest source(BytesReference source) {
return source(source, XContentFactory.xContentType(source));
}
/**
* Sets the document to index in bytes form.
*/
@ -402,15 +382,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
return this;
}
/**
* Sets the document to index in bytes form.
* @deprecated use {@link #source(byte[], XContentType)}
*/
@Deprecated
public IndexRequest source(byte[] source) {
return source(source, 0, source.length);
}
/**
* Sets the document to index in bytes form.
*/
@ -418,20 +389,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
return source(source, 0, source.length, xContentType);
}
/**
* Sets the document to index in bytes form (assumed to be safe to be used from different
* threads).
*
* @param source The source to index
* @param offset The offset in the byte array
* @param length The length of the data
* @deprecated use {@link #source(byte[], int, int, XContentType)}
*/
@Deprecated
public IndexRequest source(byte[] source, int offset, int length) {
return source(new BytesArray(source, offset, length), XContentFactory.xContentType(source));
}
/**
* Sets the document to index in bytes form (assumed to be safe to be used from different
* threads).

View File

@ -80,16 +80,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
return this;
}
/**
* Sets the source.
* @deprecated use {@link #setSource(BytesReference, XContentType)}
*/
@Deprecated
public IndexRequestBuilder setSource(BytesReference source) {
request.source(source);
return this;
}
/**
* Sets the source.
*/
@ -118,19 +108,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
return this;
}
/**
* Sets the document source to index.
* <p>
* Note, its preferable to either set it using {@link #setSource(org.elasticsearch.common.xcontent.XContentBuilder)}
* or using the {@link #setSource(byte[], XContentType)}.
* @deprecated use {@link #setSource(String, XContentType)}
*/
@Deprecated
public IndexRequestBuilder setSource(String source) {
request.source(source);
return this;
}
/**
* Sets the document source to index.
* <p>
@ -150,16 +127,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
return this;
}
/**
* Sets the document to index in bytes form.
* @deprecated use {@link #setSource(byte[], XContentType)}
*/
@Deprecated
public IndexRequestBuilder setSource(byte[] source) {
request.source(source);
return this;
}
/**
* Sets the document to index in bytes form.
*/
@ -168,21 +135,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
return this;
}
/**
* Sets the document to index in bytes form (assumed to be safe to be used from different
* threads).
*
* @param source The source to index
* @param offset The offset in the byte array
* @param length The length of the data
* @deprecated use {@link #setSource(byte[], int, int, XContentType)}
*/
@Deprecated
public IndexRequestBuilder setSource(byte[] source, int offset, int length) {
request.source(source, offset, length);
return this;
}
/**
* Sets the document to index in bytes form (assumed to be safe to be used from different
* threads).

View File

@ -76,7 +76,7 @@ public class IndexResponse extends DocWriteResponse {
public static IndexResponse fromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
IndexResponseBuilder context = new IndexResponseBuilder();
Builder context = new Builder();
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
parseXContentFields(parser, context);
}
@ -86,7 +86,7 @@ public class IndexResponse extends DocWriteResponse {
/**
* Parse the current token and update the parsing context appropriately.
*/
public static void parseXContentFields(XContentParser parser, IndexResponseBuilder context) throws IOException {
public static void parseXContentFields(XContentParser parser, Builder context) throws IOException {
XContentParser.Token token = parser.currentToken();
String currentFieldName = parser.currentName();
@ -99,7 +99,12 @@ public class IndexResponse extends DocWriteResponse {
}
}
public static class IndexResponseBuilder extends DocWriteResponse.DocWriteResponseBuilder {
/**
* Builder class for {@link IndexResponse}. This builder is usually used during xcontent parsing to
* temporarily store the parsed values, then the {@link Builder#build()} method is called to
* instantiate the {@link IndexResponse}.
*/
public static class Builder extends DocWriteResponse.Builder {
private boolean created = false;

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
public class MainResponse extends ActionResponse implements ToXContentObject {
@ -137,4 +138,26 @@ public class MainResponse extends ActionResponse implements ToXContentObject {
public static MainResponse fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
MainResponse other = (MainResponse) o;
return Objects.equals(nodeName, other.nodeName) &&
Objects.equals(version, other.version) &&
Objects.equals(clusterUuid, other.clusterUuid) &&
Objects.equals(build, other.build) &&
Objects.equals(available, other.available) &&
Objects.equals(clusterName, other.clusterName);
}
@Override
public int hashCode() {
return Objects.hash(nodeName, version, clusterUuid, build, clusterName, available);
}
}

View File

@ -42,7 +42,6 @@ import org.elasticsearch.transport.Transport;
import java.util.List;
import java.util.Map;
import java.util.StringJoiner;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
@ -61,7 +60,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
**/
private final Function<String, Transport.Connection> nodeIdToConnection;
private final SearchTask task;
private final AtomicArray<Result> results;
private final SearchPhaseResults<Result> results;
private final long clusterStateVersion;
private final Map<String, AliasFilter> aliasFilter;
private final Map<String, Float> concreteIndexBoosts;
@ -76,7 +75,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
Executor executor, SearchRequest request,
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
long clusterStateVersion, SearchTask task) {
long clusterStateVersion, SearchTask task, SearchPhaseResults<Result> resultConsumer) {
super(name, request, shardsIts, logger);
this.startTime = startTime;
this.logger = logger;
@ -87,9 +86,9 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
this.listener = listener;
this.nodeIdToConnection = nodeIdToConnection;
this.clusterStateVersion = clusterStateVersion;
results = new AtomicArray<>(shardsIts.size());
this.concreteIndexBoosts = concreteIndexBoosts;
this.aliasFilter = aliasFilter;
this.results = resultConsumer;
}
/**
@ -105,7 +104,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
* This is the main entry point for a search. This method starts the search execution of the initial phase.
*/
public final void start() {
if (results.length() == 0) {
if (getNumShards() == 0) {
//no search shards to search on, bail with empty response
//(it happens with search across _all with no indices around and consistent with broadcast operations)
listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, buildTookInMillis(),
@ -130,8 +129,8 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
onPhaseFailure(currentPhase, "all shards failed", null);
} else {
if (logger.isTraceEnabled()) {
final String resultsFrom = results.asList().stream()
.map(r -> r.value.shardTarget().toString()).collect(Collectors.joining(","));
final String resultsFrom = results.getSuccessfulResults()
.map(r -> r.shardTarget().toString()).collect(Collectors.joining(","));
logger.trace("[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})",
currentPhase.getName(), nextPhase.getName(), resultsFrom, clusterStateVersion);
}
@ -178,7 +177,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
synchronized (shardFailuresMutex) {
shardFailures = this.shardFailures.get(); // read again otherwise somebody else has created it?
if (shardFailures == null) { // still null so we are the first and create a new instance
shardFailures = new AtomicArray<>(results.length());
shardFailures = new AtomicArray<>(getNumShards());
this.shardFailures.set(shardFailures);
}
}
@ -194,7 +193,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
}
}
if (results.get(shardIndex) != null) {
if (results.hasResult(shardIndex)) {
assert failure == null : "shard failed before but shouldn't: " + failure;
successfulOps.decrementAndGet(); // if this shard was successful before (initial phase) we have to adjust the counter
}
@ -207,22 +206,22 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
* @param exception the exception explaining or causing the phase failure
*/
private void raisePhaseFailure(SearchPhaseExecutionException exception) {
for (AtomicArray.Entry<Result> entry : results.asList()) {
results.getSuccessfulResults().forEach((entry) -> {
try {
Transport.Connection connection = nodeIdToConnection.apply(entry.value.shardTarget().getNodeId());
sendReleaseSearchContext(entry.value.id(), connection);
Transport.Connection connection = nodeIdToConnection.apply(entry.shardTarget().getNodeId());
sendReleaseSearchContext(entry.id(), connection);
} catch (Exception inner) {
inner.addSuppressed(exception);
logger.trace("failed to release context", inner);
}
}
});
listener.onFailure(exception);
}
@Override
public final void onShardSuccess(int shardIndex, Result result) {
successfulOps.incrementAndGet();
results.set(shardIndex, result);
results.consumeResult(shardIndex, result);
if (logger.isTraceEnabled()) {
logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null);
}
@ -242,7 +241,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
@Override
public final int getNumShards() {
return results.length();
return results.getNumShards();
}
@Override
@ -262,7 +261,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
@Override
public final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) {
return new SearchResponse(internalSearchResponse, scrollId, results.length(), successfulOps.get(),
return new SearchResponse(internalSearchResponse, scrollId, getNumShards(), successfulOps.get(),
buildTookInMillis(), buildShardFailures());
}
@ -310,6 +309,5 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
* executed shard request
* @param context the search context for the next phase
*/
protected abstract SearchPhase getNextPhase(AtomicArray<Result> results, SearchPhaseContext context);
protected abstract SearchPhase getNextPhase(SearchPhaseResults<Result> results, SearchPhaseContext context);
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.search;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
@ -30,17 +29,13 @@ import org.elasticsearch.search.SearchShardTarget;
* where the given index is used to set the result on the array.
*/
final class CountedCollector<R extends SearchPhaseResult> {
private final AtomicArray<R> resultArray;
private final ResultConsumer<R> resultConsumer;
private final CountDown counter;
private final Runnable onFinish;
private final SearchPhaseContext context;
CountedCollector(AtomicArray<R> resultArray, int expectedOps, Runnable onFinish, SearchPhaseContext context) {
if (expectedOps > resultArray.length()) {
throw new IllegalStateException("unexpected number of operations. got: " + expectedOps + " but array size is: "
+ resultArray.length());
}
this.resultArray = resultArray;
CountedCollector(ResultConsumer<R> resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) {
this.resultConsumer = resultConsumer;
this.counter = new CountDown(expectedOps);
this.onFinish = onFinish;
this.context = context;
@ -63,7 +58,7 @@ final class CountedCollector<R extends SearchPhaseResult> {
void onResult(int index, R result, SearchShardTarget target) {
try {
result.shardTarget(target);
resultArray.set(index, result);
resultConsumer.consume(index, result);
} finally {
countDown();
}
@ -80,4 +75,12 @@ final class CountedCollector<R extends SearchPhaseResult> {
countDown();
}
}
/**
* A functional interface to plug in shard result consumers to this collector
*/
@FunctionalInterface
public interface ResultConsumer<R extends SearchPhaseResult> {
void consume(int shardIndex, R result);
}
}

View File

@ -40,18 +40,19 @@ import java.util.function.Function;
* @see CountedCollector#onFailure(int, SearchShardTarget, Exception)
*/
final class DfsQueryPhase extends SearchPhase {
private final AtomicArray<QuerySearchResultProvider> queryResult;
private final InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> queryResult;
private final SearchPhaseController searchPhaseController;
private final AtomicArray<DfsSearchResult> dfsSearchResults;
private final Function<AtomicArray<QuerySearchResultProvider>, SearchPhase> nextPhaseFactory;
private final Function<InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider>, SearchPhase> nextPhaseFactory;
private final SearchPhaseContext context;
private final SearchTransportService searchTransportService;
DfsQueryPhase(AtomicArray<DfsSearchResult> dfsSearchResults,
SearchPhaseController searchPhaseController,
Function<AtomicArray<QuerySearchResultProvider>, SearchPhase> nextPhaseFactory, SearchPhaseContext context) {
Function<InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider>, SearchPhase> nextPhaseFactory,
SearchPhaseContext context) {
super("dfs_query");
this.queryResult = new AtomicArray<>(dfsSearchResults.length());
this.queryResult = searchPhaseController.newSearchPhaseResults(context.getRequest(), context.getNumShards());
this.searchPhaseController = searchPhaseController;
this.dfsSearchResults = dfsSearchResults;
this.nextPhaseFactory = nextPhaseFactory;
@ -64,7 +65,8 @@ final class DfsQueryPhase extends SearchPhase {
// TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs
// to free up memory early
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsSearchResults);
final CountedCollector<QuerySearchResultProvider> counter = new CountedCollector<>(queryResult, dfsSearchResults.asList().size(),
final CountedCollector<QuerySearchResultProvider> counter = new CountedCollector<>(queryResult::consumeResult,
dfsSearchResults.asList().size(),
() -> {
context.executeNextPhase(this, nextPhaseFactory.apply(queryResult));
}, context);

View File

@ -49,29 +49,31 @@ final class FetchSearchPhase extends SearchPhase {
private final Function<SearchResponse, SearchPhase> nextPhaseFactory;
private final SearchPhaseContext context;
private final Logger logger;
private final InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> resultConsumer;
FetchSearchPhase(AtomicArray<QuerySearchResultProvider> queryResults,
FetchSearchPhase(InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> resultConsumer,
SearchPhaseController searchPhaseController,
SearchPhaseContext context) {
this(queryResults, searchPhaseController, context,
this(resultConsumer, searchPhaseController, context,
(response) -> new ExpandSearchPhase(context, response, // collapse only happens if the request has inner hits
(finalResponse) -> sendResponsePhase(finalResponse, context)));
}
FetchSearchPhase(AtomicArray<QuerySearchResultProvider> queryResults,
FetchSearchPhase(InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> resultConsumer,
SearchPhaseController searchPhaseController,
SearchPhaseContext context, Function<SearchResponse, SearchPhase> nextPhaseFactory) {
super("fetch");
if (context.getNumShards() != queryResults.length()) {
if (context.getNumShards() != resultConsumer.getNumShards()) {
throw new IllegalStateException("number of shards must match the length of the query results but doesn't:"
+ context.getNumShards() + "!=" + queryResults.length());
+ context.getNumShards() + "!=" + resultConsumer.getNumShards());
}
this.fetchResults = new AtomicArray<>(queryResults.length());
this.fetchResults = new AtomicArray<>(resultConsumer.getNumShards());
this.searchPhaseController = searchPhaseController;
this.queryResults = queryResults;
this.queryResults = resultConsumer.results;
this.nextPhaseFactory = nextPhaseFactory;
this.context = context;
this.logger = context.getLogger();
this.resultConsumer = resultConsumer;
}
@ -99,7 +101,7 @@ final class FetchSearchPhase extends SearchPhase {
ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(isScrollSearch, queryResults);
String scrollId = isScrollSearch ? TransportSearchHelper.buildScrollId(queryResults) : null;
List<AtomicArray.Entry<QuerySearchResultProvider>> queryResultsAsList = queryResults.asList();
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResultsAsList);
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce();
final boolean queryAndFetchOptimization = queryResults.length() == 1;
final Runnable finishPhase = ()
-> moveToNextPhase(searchPhaseController, sortedShardDocs, scrollId, reducedQueryPhase, queryAndFetchOptimization ?
@ -119,7 +121,7 @@ final class FetchSearchPhase extends SearchPhase {
final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch ?
searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, sortedShardDocs, numShards)
: null;
final CountedCollector<FetchSearchResult> counter = new CountedCollector<>(fetchResults,
final CountedCollector<FetchSearchResult> counter = new CountedCollector<>(fetchResults::set,
docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not
finishPhase, context);
for (int i = 0; i < docIdsToLoad.length; i++) {

View File

@ -28,12 +28,14 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.transport.ConnectTransportException;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Stream;
/**
* This is an abstract base class that encapsulates the logic to fan out to all shards in provided {@link GroupShardsIterator}
@ -213,4 +215,53 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
* @param listener the listener to notify on response
*/
protected abstract void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener<FirstResult> listener);
/**
* This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing
*/
static class SearchPhaseResults<Result extends SearchPhaseResult> {
final AtomicArray<Result> results;
SearchPhaseResults(int size) {
results = new AtomicArray<>(size);
}
/**
* Returns the number of expected results this class should collect
*/
final int getNumShards() {
return results.length();
}
/**
* A stream of all non-null (successful) shard results
*/
final Stream<Result> getSuccessfulResults() {
return results.asList().stream().map(e -> e.value);
}
/**
* Consumes a single shard result
* @param shardIndex the shards index, this is a 0-based id that is used to establish a 1 to 1 mapping to the searched shards
* @param result the shards result
*/
void consumeResult(int shardIndex, Result result) {
assert results.get(shardIndex) == null : "shardIndex: " + shardIndex + " is already set";
results.set(shardIndex, result);
}
/**
* Returns <code>true</code> iff a result if present for the given shard ID.
*/
final boolean hasResult(int shardIndex) {
return results.get(shardIndex) != null;
}
/**
* Reduces the collected results
*/
SearchPhaseController.ReducedQueryPhase reduce() {
throw new UnsupportedOperationException("reduce is not supported");
}
}
}

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.transport.Transport;
@ -43,7 +42,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
long clusterStateVersion, SearchTask task) {
super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor,
request, listener, shardsIts, startTime, clusterStateVersion, task);
request, listener, shardsIts, startTime, clusterStateVersion, task, new SearchPhaseResults<>(shardsIts.size()));
this.searchPhaseController = searchPhaseController;
}
@ -54,8 +53,8 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
}
@Override
protected SearchPhase getNextPhase(AtomicArray<DfsSearchResult> results, SearchPhaseContext context) {
return new DfsQueryPhase(results, searchPhaseController,
protected SearchPhase getNextPhase(SearchPhaseResults<DfsSearchResult> results, SearchPhaseContext context) {
return new DfsQueryPhase(results.results, searchPhaseController,
(queryResults) -> new FetchSearchPhase(queryResults, searchPhaseController, context), context);
}
}

View File

@ -114,4 +114,5 @@ interface SearchPhaseContext extends ActionListener<SearchResponse>, Executor {
* a response is returned to the user indicating that all shards have failed.
*/
void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase);
}

View File

@ -44,6 +44,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.FetchSearchResult;
@ -70,14 +71,6 @@ import java.util.stream.StreamSupport;
public class SearchPhaseController extends AbstractComponent {
private static final Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>> QUERY_RESULT_ORDERING = (o1, o2) -> {
int i = o1.value.shardTarget().getIndex().compareTo(o2.value.shardTarget().getIndex());
if (i == 0) {
i = o1.value.shardTarget().getShardId().id() - o2.value.shardTarget().getShardId().id();
}
return i;
};
private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0];
private final BigArrays bigArrays;
@ -149,6 +142,9 @@ public class SearchPhaseController extends AbstractComponent {
* named completion suggestion across all shards. If more than one named completion suggestion is specified in the
* request, the suggest docs for a named suggestion are ordered by the suggestion name.
*
* Note: The order of the sorted score docs depends on the shard index in the result array if the merge process needs to disambiguate
* the result. In oder to obtain stable results the shard index (index of the result in the result array) must be the same.
*
* @param ignoreFrom Whether to ignore the from and sort all hits in each shard result.
* Enabled only for scroll search, because that only retrieves hits of length 'size' in the query phase.
* @param resultsArr Shard result holder
@ -159,26 +155,31 @@ public class SearchPhaseController extends AbstractComponent {
return EMPTY_DOCS;
}
final QuerySearchResult result;
boolean canOptimize = false;
QuerySearchResult result = null;
int shardIndex = -1;
if (results.size() == 1) {
canOptimize = true;
result = results.get(0).value.queryResult();
shardIndex = results.get(0).index;
} else {
boolean hasResult = false;
QuerySearchResult resultToOptimize = null;
// lets see if we only got hits from a single shard, if so, we can optimize...
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : results) {
if (entry.value.queryResult().hasHits()) {
if (result != null) { // we already have one, can't really optimize
if (hasResult) { // we already have one, can't really optimize
canOptimize = false;
break;
}
canOptimize = true;
result = entry.value.queryResult();
hasResult = true;
resultToOptimize = entry.value.queryResult();
shardIndex = entry.index;
}
}
result = canOptimize ? resultToOptimize : results.get(0).value.queryResult();
assert result != null;
}
if (canOptimize) {
int offset = result.from();
@ -224,74 +225,62 @@ public class SearchPhaseController extends AbstractComponent {
return docs;
}
@SuppressWarnings("unchecked")
AtomicArray.Entry<? extends QuerySearchResultProvider>[] sortedResults = results.toArray(new AtomicArray.Entry[results.size()]);
Arrays.sort(sortedResults, QUERY_RESULT_ORDERING);
QuerySearchResultProvider firstResult = sortedResults[0].value;
int topN = firstResult.queryResult().size();
int from = firstResult.queryResult().from();
if (ignoreFrom) {
from = 0;
}
final int topN = result.queryResult().size();
final int from = ignoreFrom ? 0 : result.queryResult().from();
final TopDocs mergedTopDocs;
int numShards = resultsArr.length();
if (firstResult.queryResult().topDocs() instanceof CollapseTopFieldDocs) {
CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) firstResult.queryResult().topDocs();
final int numShards = resultsArr.length();
if (result.queryResult().topDocs() instanceof CollapseTopFieldDocs) {
CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) result.queryResult().topDocs();
final Sort sort = new Sort(firstTopDocs.fields);
final CollapseTopFieldDocs[] shardTopDocs = new CollapseTopFieldDocs[numShards];
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : sortedResults) {
if (result.size() != shardTopDocs.length) {
// TopDocs#merge can't deal with null shard TopDocs
final CollapseTopFieldDocs empty = new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0],
sort.getSort(), new Object[0], Float.NaN);
Arrays.fill(shardTopDocs, empty);
}
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
// the 'index' field is the position in the resultsArr atomic array
shardTopDocs[sortedResult.index] = (CollapseTopFieldDocs) topDocs;
}
// TopDocs#merge can't deal with null shard TopDocs
for (int i = 0; i < shardTopDocs.length; ++i) {
if (shardTopDocs[i] == null) {
shardTopDocs[i] = new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0],
sort.getSort(), new Object[0], Float.NaN);
}
}
mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs);
} else if (firstResult.queryResult().topDocs() instanceof TopFieldDocs) {
TopFieldDocs firstTopDocs = (TopFieldDocs) firstResult.queryResult().topDocs();
} else if (result.queryResult().topDocs() instanceof TopFieldDocs) {
TopFieldDocs firstTopDocs = (TopFieldDocs) result.queryResult().topDocs();
final Sort sort = new Sort(firstTopDocs.fields);
final TopFieldDocs[] shardTopDocs = new TopFieldDocs[resultsArr.length()];
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : sortedResults) {
if (result.size() != shardTopDocs.length) {
// TopDocs#merge can't deal with null shard TopDocs
final TopFieldDocs empty = new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN);
Arrays.fill(shardTopDocs, empty);
}
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
// the 'index' field is the position in the resultsArr atomic array
shardTopDocs[sortedResult.index] = (TopFieldDocs) topDocs;
}
// TopDocs#merge can't deal with null shard TopDocs
for (int i = 0; i < shardTopDocs.length; ++i) {
if (shardTopDocs[i] == null) {
shardTopDocs[i] = new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN);
}
}
mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs);
} else {
final TopDocs[] shardTopDocs = new TopDocs[resultsArr.length()];
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : sortedResults) {
if (result.size() != shardTopDocs.length) {
// TopDocs#merge can't deal with null shard TopDocs
Arrays.fill(shardTopDocs, Lucene.EMPTY_TOP_DOCS);
}
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
// the 'index' field is the position in the resultsArr atomic array
shardTopDocs[sortedResult.index] = topDocs;
}
// TopDocs#merge can't deal with null shard TopDocs
for (int i = 0; i < shardTopDocs.length; ++i) {
if (shardTopDocs[i] == null) {
shardTopDocs[i] = Lucene.EMPTY_TOP_DOCS;
}
}
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs);
}
ScoreDoc[] scoreDocs = mergedTopDocs.scoreDocs;
final Map<String, List<Suggestion<CompletionSuggestion.Entry>>> groupedCompletionSuggestions = new HashMap<>();
// group suggestions and assign shard index
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : sortedResults) {
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
Suggest shardSuggest = sortedResult.value.queryResult().suggest();
if (shardSuggest != null) {
for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) {
@ -461,23 +450,54 @@ public class SearchPhaseController extends AbstractComponent {
/**
* Reduces the given query results and consumes all aggregations and profile results.
* @param queryResults a list of non-null query shard results
*/
public final ReducedQueryPhase reducedQueryPhase(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults) {
return reducedQueryPhase(queryResults, null, 0);
}
/**
* Reduces the given query results and consumes all aggregations and profile results.
* @param queryResults a list of non-null query shard results
* @param bufferdAggs a list of pre-collected / buffered aggregations. if this list is non-null all aggregations have been consumed
* from all non-null query results.
* @param numReducePhases the number of non-final reduce phases applied to the query results.
* @see QuerySearchResult#consumeAggs()
* @see QuerySearchResult#consumeProfileResult()
*/
public final ReducedQueryPhase reducedQueryPhase(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults) {
private ReducedQueryPhase reducedQueryPhase(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults,
List<InternalAggregations> bufferdAggs, int numReducePhases) {
assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases;
numReducePhases++; // increment for this phase
long totalHits = 0;
long fetchHits = 0;
float maxScore = Float.NEGATIVE_INFINITY;
boolean timedOut = false;
Boolean terminatedEarly = null;
if (queryResults.isEmpty()) {
return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, null, null, null, null);
if (queryResults.isEmpty()) { // early terminate we have nothing to reduce
return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, null, null, null, null,
numReducePhases);
}
QuerySearchResult firstResult = queryResults.get(0).value.queryResult();
final QuerySearchResult firstResult = queryResults.get(0).value.queryResult();
final boolean hasSuggest = firstResult.suggest() != null;
final boolean hasAggs = firstResult.hasAggs();
final boolean hasProfileResults = firstResult.hasProfileResults();
final List<InternalAggregations> aggregationsList = hasAggs ? new ArrayList<>(queryResults.size()) : Collections.emptyList();
final boolean consumeAggs;
final List<InternalAggregations> aggregationsList;
if (bufferdAggs != null) {
consumeAggs = false;
// we already have results from intermediate reduces and just need to perform the final reduce
assert firstResult.hasAggs() : "firstResult has no aggs but we got non null buffered aggs?";
aggregationsList = bufferdAggs;
} else if (firstResult.hasAggs()) {
// the number of shards was less than the buffer size so we reduce agg results directly
aggregationsList = new ArrayList<>(queryResults.size());
consumeAggs = true;
} else {
// no aggregations
aggregationsList = Collections.emptyList();
consumeAggs = false;
}
// count the total (we use the query result provider here, since we might not get any hits (we scrolled past them))
final Map<String, List<Suggestion>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap();
final Map<String, ProfileShardResult> profileResults = hasProfileResults ? new HashMap<>(queryResults.size())
@ -506,7 +526,7 @@ public class SearchPhaseController extends AbstractComponent {
suggestionList.add(suggestion);
}
}
if (hasAggs) {
if (consumeAggs) {
aggregationsList.add((InternalAggregations) result.consumeAggs());
}
if (hasProfileResults) {
@ -515,16 +535,27 @@ public class SearchPhaseController extends AbstractComponent {
}
}
final Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions));
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, true);
final InternalAggregations aggregations = aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList,
firstResult.pipelineAggregators());
firstResult.pipelineAggregators(), reduceContext);
final SearchProfileShardResults shardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults);
return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, firstResult, suggest, aggregations,
shardResults);
shardResults, numReducePhases);
}
/**
* Performs an intermediate reduce phase on the aggregations. For instance with this reduce phase never prune information
* that relevant for the final reduce step. For final reduce see {@link #reduceAggs(List, List, ReduceContext)}
*/
private InternalAggregations reduceAggsIncrementally(List<InternalAggregations> aggregationsList) {
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, false);
return aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList,
null, reduceContext);
}
private InternalAggregations reduceAggs(List<InternalAggregations> aggregationsList,
List<SiblingPipelineAggregator> pipelineAggregators) {
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService);
List<SiblingPipelineAggregator> pipelineAggregators, ReduceContext reduceContext) {
InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, reduceContext);
if (pipelineAggregators != null) {
List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false)
@ -558,10 +589,15 @@ public class SearchPhaseController extends AbstractComponent {
final InternalAggregations aggregations;
// the reduced profile results
final SearchProfileShardResults shardResults;
// the number of reduces phases
final int numReducePhases;
ReducedQueryPhase(long totalHits, long fetchHits, float maxScore, boolean timedOut, Boolean terminatedEarly,
QuerySearchResult oneResult, Suggest suggest, InternalAggregations aggregations,
SearchProfileShardResults shardResults) {
SearchProfileShardResults shardResults, int numReducePhases) {
if (numReducePhases <= 0) {
throw new IllegalArgumentException("at least one reduce phase must have been applied but was: " + numReducePhases);
}
this.totalHits = totalHits;
this.fetchHits = fetchHits;
if (Float.isInfinite(maxScore)) {
@ -575,6 +611,7 @@ public class SearchPhaseController extends AbstractComponent {
this.suggest = suggest;
this.aggregations = aggregations;
this.shardResults = shardResults;
this.numReducePhases = numReducePhases;
}
/**
@ -582,7 +619,7 @@ public class SearchPhaseController extends AbstractComponent {
* @see #merge(boolean, ScoreDoc[], ReducedQueryPhase, AtomicArray)
*/
public InternalSearchResponse buildResponse(SearchHits hits) {
return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly);
return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly, numReducePhases);
}
/**
@ -593,4 +630,95 @@ public class SearchPhaseController extends AbstractComponent {
}
}
/**
* A {@link org.elasticsearch.action.search.InitialSearchPhase.SearchPhaseResults} implementation
* that incrementally reduces aggregation results as shard results are consumed.
* This implementation can be configured to batch up a certain amount of results and only reduce them
* iff the buffer is exhausted.
*/
static final class QueryPhaseResultConsumer
extends InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> {
private final InternalAggregations[] buffer;
private int index;
private final SearchPhaseController controller;
private int numReducePhases = 0;
/**
* Creates a new {@link QueryPhaseResultConsumer}
* @param controller a controller instance to reduce the query response objects
* @param expectedResultSize the expected number of query results. Corresponds to the number of shards queried
* @param bufferSize the size of the reduce buffer. if the buffer size is smaller than the number of expected results
* the buffer is used to incrementally reduce aggregation results before all shards responded.
*/
private QueryPhaseResultConsumer(SearchPhaseController controller, int expectedResultSize, int bufferSize) {
super(expectedResultSize);
if (expectedResultSize != 1 && bufferSize < 2) {
throw new IllegalArgumentException("buffer size must be >= 2 if there is more than one expected result");
}
if (expectedResultSize <= bufferSize) {
throw new IllegalArgumentException("buffer size must be less than the expected result size");
}
this.controller = controller;
// no need to buffer anything if we have less expected results. in this case we don't consume any results ahead of time.
this.buffer = new InternalAggregations[bufferSize];
}
@Override
public void consumeResult(int shardIndex, QuerySearchResultProvider result) {
super.consumeResult(shardIndex, result);
QuerySearchResult queryResult = result.queryResult();
assert queryResult.hasAggs() : "this collector should only be used if aggs are requested";
consumeInternal(queryResult);
}
private synchronized void consumeInternal(QuerySearchResult querySearchResult) {
InternalAggregations aggregations = (InternalAggregations) querySearchResult.consumeAggs();
if (index == buffer.length) {
InternalAggregations reducedAggs = controller.reduceAggsIncrementally(Arrays.asList(buffer));
Arrays.fill(buffer, null);
numReducePhases++;
buffer[0] = reducedAggs;
index = 1;
}
final int i = index++;
buffer[i] = aggregations;
}
private synchronized List<InternalAggregations> getRemaining() {
return Arrays.asList(buffer).subList(0, index);
}
@Override
public ReducedQueryPhase reduce() {
return controller.reducedQueryPhase(results.asList(), getRemaining(), numReducePhases);
}
/**
* Returns the number of buffered results
*/
int getNumBuffered() {
return index;
}
int getNumReducePhases() { return numReducePhases; }
}
/**
* Returns a new SearchPhaseResults instance. This might return an instance that reduces search responses incrementally.
*/
InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> newSearchPhaseResults(SearchRequest request, int numShards) {
SearchSourceBuilder source = request.source();
if (source != null && source.aggregations() != null) {
if (request.getBatchedReduceSize() < numShards) {
// only use this if there are aggs and if there are more shards than we should reduce at once
return new QueryPhaseResultConsumer(this, numShards, request.getBatchedReduceSize());
}
}
return new InitialSearchPhase.SearchPhaseResults(numShards) {
@Override
public ReducedQueryPhase reduce() {
return reducedQueryPhase(results.asList());
}
};
}
}

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.transport.Transport;
@ -44,17 +43,19 @@ final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<Qu
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion,
SearchTask task) {
super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor,
request, listener, shardsIts, startTime, clusterStateVersion, task);
request, listener, shardsIts, startTime, clusterStateVersion, task,
searchPhaseController.newSearchPhaseResults(request, shardsIts.size()));
this.searchPhaseController = searchPhaseController;
}
protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener listener) {
getSearchTransport().sendExecuteQuery(getConnection(shard.currentNodeId()),
buildShardSearchRequest(shardIt, shard), getTask(), listener);
}
@Override
protected SearchPhase getNextPhase(AtomicArray<QuerySearchResultProvider> results, SearchPhaseContext context) {
protected SearchPhase getNextPhase(SearchPhaseResults<QuerySearchResultProvider> results, SearchPhaseContext context) {
return new FetchSearchPhase(results, searchPhaseController, context);
}
}

View File

@ -70,6 +70,8 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
private Scroll scroll;
private int batchedReduceSize = 512;
private String[] types = Strings.EMPTY_ARRAY;
public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosed();
@ -274,6 +276,25 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
return this.requestCache;
}
/**
* Sets the number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection
* mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large.
*/
public void setBatchedReduceSize(int batchedReduceSize) {
if (batchedReduceSize <= 1) {
throw new IllegalArgumentException("batchedReduceSize must be >= 2");
}
this.batchedReduceSize = batchedReduceSize;
}
/**
* Returns the number of shard results that should be reduced at once on the coordinating node. This value should be used as a
* protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large.
*/
public int getBatchedReduceSize() {
return batchedReduceSize;
}
/**
* @return true if the request only has suggest
*/
@ -320,6 +341,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
types = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
requestCache = in.readOptionalBoolean();
batchedReduceSize = in.readVInt();
}
@Override
@ -337,6 +359,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
out.writeStringArray(types);
indicesOptions.writeIndicesOptions(out);
out.writeOptionalBoolean(requestCache);
out.writeVInt(batchedReduceSize);
}
@Override

View File

@ -523,4 +523,13 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
}
return request.source();
}
/**
* Sets the number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection
* mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large.
*/
public SearchRequestBuilder setBatchedReduceSize(int batchedReduceSize) {
this.request.setBatchedReduceSize(batchedReduceSize);
return this;
}
}

View File

@ -61,7 +61,8 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
public SearchResponse() {
}
public SearchResponse(InternalSearchResponse internalResponse, String scrollId, int totalShards, int successfulShards, long tookInMillis, ShardSearchFailure[] shardFailures) {
public SearchResponse(InternalSearchResponse internalResponse, String scrollId, int totalShards, int successfulShards,
long tookInMillis, ShardSearchFailure[] shardFailures) {
this.internalResponse = internalResponse;
this.scrollId = scrollId;
this.totalShards = totalShards;
@ -106,6 +107,13 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
return internalResponse.terminatedEarly();
}
/**
* Returns the number of reduce phases applied to obtain this search response
*/
public int getNumReducePhases() {
return internalResponse.getNumReducePhases();
}
/**
* How long the search took.
*/
@ -172,13 +180,6 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
return internalResponse.profile();
}
static final class Fields {
static final String _SCROLL_ID = "_scroll_id";
static final String TOOK = "took";
static final String TIMED_OUT = "timed_out";
static final String TERMINATED_EARLY = "terminated_early";
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@ -189,14 +190,18 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
if (scrollId != null) {
builder.field(Fields._SCROLL_ID, scrollId);
builder.field("_scroll_id", scrollId);
}
builder.field(Fields.TOOK, tookInMillis);
builder.field(Fields.TIMED_OUT, isTimedOut());
builder.field("took", tookInMillis);
builder.field("timed_out", isTimedOut());
if (isTerminatedEarly() != null) {
builder.field(Fields.TERMINATED_EARLY, isTerminatedEarly());
builder.field("terminated_early", isTerminatedEarly());
}
RestActions.buildBroadcastShardsHeader(builder, params, getTotalShards(), getSuccessfulShards(), getFailedShards(), getShardFailures());
if (getNumReducePhases() != 1) {
builder.field("num_reduce_phases", getNumReducePhases());
}
RestActions.buildBroadcastShardsHeader(builder, params, getTotalShards(), getSuccessfulShards(), getFailedShards(),
getShardFailures());
internalResponse.toXContent(builder, params);
return builder;
}

View File

@ -19,14 +19,12 @@
package org.elasticsearch.action.support.replication;
import org.elasticsearch.Version;
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
@ -38,8 +36,6 @@ import java.io.IOException;
public abstract class ReplicatedWriteRequest<R extends ReplicatedWriteRequest<R>> extends ReplicationRequest<R> implements WriteRequest<R> {
private RefreshPolicy refreshPolicy = RefreshPolicy.NONE;
private long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
/**
* Constructor for deserialization.
*/
@ -66,32 +62,11 @@ public abstract class ReplicatedWriteRequest<R extends ReplicatedWriteRequest<R>
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
refreshPolicy = RefreshPolicy.readFrom(in);
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
seqNo = in.readZLong();
} else {
seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
refreshPolicy.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
out.writeZLong(seqNo);
}
}
/**
* Returns the sequence number for this operation. The sequence number is assigned while the operation
* is performed on the primary shard.
*/
public long getSeqNo() {
return seqNo;
}
/** sets the sequence number for this operation. should only be called on the primary shard */
public void setSeqNo(long seqNo) {
this.seqNo = seqNo;
}
}

View File

@ -38,7 +38,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
@ -74,7 +73,6 @@ public class ReplicationResponse extends ActionResponse {
public static class ShardInfo implements Streamable, ToXContentObject {
private static final String _SHARDS = "_shards";
private static final String TOTAL = "total";
private static final String SUCCESSFUL = "successful";
private static final String FAILED = "failed";
@ -134,25 +132,6 @@ public class ReplicationResponse extends ActionResponse {
return status;
}
@Override
public boolean equals(Object that) {
if (this == that) {
return true;
}
if (that == null || getClass() != that.getClass()) {
return false;
}
ShardInfo other = (ShardInfo) that;
return Objects.equals(total, other.total) &&
Objects.equals(successful, other.successful) &&
Arrays.equals(failures, other.failures);
}
@Override
public int hashCode() {
return Objects.hash(total, successful, failures);
}
@Override
public void readFrom(StreamInput in) throws IOException {
total = in.readVInt();
@ -327,27 +306,6 @@ public class ReplicationResponse extends ActionResponse {
return primary;
}
@Override
public boolean equals(Object that) {
if (this == that) {
return true;
}
if (that == null || getClass() != that.getClass()) {
return false;
}
Failure failure = (Failure) that;
return Objects.equals(primary, failure.primary) &&
Objects.equals(shardId, failure.shardId) &&
Objects.equals(nodeId, failure.nodeId) &&
Objects.equals(cause, failure.cause) &&
Objects.equals(status, failure.status);
}
@Override
public int hashCode() {
return Objects.hash(shardId, nodeId, cause, status, primary);
}
@Override
public void readFrom(StreamInput in) throws IOException {
shardId = ShardId.readShardId(in);

View File

@ -171,7 +171,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
final ShardId shardId = request.getShardId();
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
final IndexShard indexShard = indexService.getShard(shardId.getId());
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard, threadPool::estimatedTimeInMillis);
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard, threadPool::absoluteTimeInMillis);
switch (result.getResponseResult()) {
case CREATED:
IndexRequest upsertRequest = result.action();

View File

@ -30,8 +30,11 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
@ -49,7 +52,7 @@ import java.util.Map;
import static org.elasticsearch.action.ValidateActions.addValidationError;
public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
implements DocWriteRequest<UpdateRequest>, WriteRequest<UpdateRequest> {
implements DocWriteRequest<UpdateRequest>, WriteRequest<UpdateRequest>, ToXContentObject {
private String type;
private String id;
@ -553,16 +556,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
* @deprecated use {@link #doc(String, XContentType)}
*/
@Deprecated
public UpdateRequest doc(String source) {
safeDoc().source(source);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
@ -571,16 +564,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
* @deprecated use {@link #doc(byte[], XContentType)}
*/
@Deprecated
public UpdateRequest doc(byte[] source) {
safeDoc().source(source);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
@ -589,16 +572,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
* @deprecated use {@link #doc(byte[], int, int, XContentType)}
*/
@Deprecated
public UpdateRequest doc(byte[] source, int offset, int length) {
safeDoc().source(source, offset, length);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
@ -669,16 +642,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
* @deprecated use {@link #upsert(String, XContentType)}
*/
@Deprecated
public UpdateRequest upsert(String source) {
safeUpsertRequest().source(source);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
@ -687,16 +650,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
* @deprecated use {@link #upsert(byte[], XContentType)}
*/
@Deprecated
public UpdateRequest upsert(byte[] source) {
safeUpsertRequest().source(source);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
@ -705,16 +658,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
* @deprecated use {@link #upsert(byte[], int, int, XContentType)}
*/
@Deprecated
public UpdateRequest upsert(byte[] source, int offset, int length) {
safeUpsertRequest().source(source, offset, length);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
@ -906,4 +849,42 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
out.writeBoolean(scriptedUpsert);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (docAsUpsert) {
builder.field("doc_as_upsert", docAsUpsert);
}
if (doc != null) {
XContentType xContentType = doc.getContentType();
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, doc.source(), xContentType)) {
builder.field("doc");
builder.copyCurrentStructure(parser);
}
}
if (script != null) {
builder.field("script", script);
}
if (upsertRequest != null) {
XContentType xContentType = upsertRequest.getContentType();
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, upsertRequest.source(), xContentType)) {
builder.field("upsert");
builder.copyCurrentStructure(parser);
}
}
if (scriptedUpsert) {
builder.field("scripted_upsert", scriptedUpsert);
}
if (detectNoop == false) {
builder.field("detect_noop", detectNoop);
}
if (fields != null) {
builder.array("fields", fields);
}
if (fetchSourceContext != null) {
builder.field("_source", fetchSourceContext);
}
builder.endObject();
return builder;
}
}

View File

@ -221,16 +221,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
* @deprecated use {@link #setDoc(String, XContentType)}
*/
@Deprecated
public UpdateRequestBuilder setDoc(String source) {
request.doc(source);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
@ -239,16 +229,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
* @deprecated use {@link #setDoc(byte[], XContentType)}
*/
@Deprecated
public UpdateRequestBuilder setDoc(byte[] source) {
request.doc(source);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
@ -257,16 +237,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
* @deprecated use {@link #setDoc(byte[], int, int, XContentType)}
*/
@Deprecated
public UpdateRequestBuilder setDoc(byte[] source, int offset, int length) {
request.doc(source, offset, length);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
@ -326,16 +296,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
* @deprecated use {@link #setUpsert(String, XContentType)}
*/
@Deprecated
public UpdateRequestBuilder setUpsert(String source) {
request.upsert(source);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
@ -344,16 +304,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
* @deprecated use {@link #setDoc(byte[], XContentType)}
*/
@Deprecated
public UpdateRequestBuilder setUpsert(byte[] source) {
request.upsert(source);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
@ -362,16 +312,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
* @deprecated use {@link #setUpsert(byte[], int, int, XContentType)}
*/
@Deprecated
public UpdateRequestBuilder setUpsert(byte[] source, int offset, int length) {
request.upsert(source, offset, length);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/

View File

@ -114,7 +114,7 @@ public class UpdateResponse extends DocWriteResponse {
public static UpdateResponse fromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
UpdateResponseBuilder context = new UpdateResponseBuilder();
Builder context = new Builder();
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
parseXContentFields(parser, context);
}
@ -124,7 +124,7 @@ public class UpdateResponse extends DocWriteResponse {
/**
* Parse the current token and update the parsing context appropriately.
*/
public static void parseXContentFields(XContentParser parser, UpdateResponseBuilder context) throws IOException {
public static void parseXContentFields(XContentParser parser, Builder context) throws IOException {
XContentParser.Token token = parser.currentToken();
String currentFieldName = parser.currentName();
@ -137,7 +137,12 @@ public class UpdateResponse extends DocWriteResponse {
}
}
public static class UpdateResponseBuilder extends DocWriteResponse.DocWriteResponseBuilder {
/**
* Builder class for {@link UpdateResponse}. This builder is usually used during xcontent parsing to
* temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to
* instantiate the {@link UpdateResponse}.
*/
public static class Builder extends DocWriteResponse.Builder {
private GetResult getResult = null;

View File

@ -100,10 +100,11 @@ public abstract class Terminal {
public final boolean promptYesNo(String prompt, boolean defaultYes) {
String answerPrompt = defaultYes ? " [Y/n]" : " [y/N]";
while (true) {
String answer = readText(prompt + answerPrompt).toLowerCase(Locale.ROOT);
if (answer.isEmpty()) {
String answer = readText(prompt + answerPrompt);
if (answer == null || answer.isEmpty()) {
return defaultYes;
}
answer = answer.toLowerCase(Locale.ROOT);
boolean answerYes = answer.equals("y");
if (answerYes == false && answer.equals("n") == false) {
println("Did not understand answer '" + answer + "'");

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Mapping;
@ -68,7 +69,7 @@ public class MappingUpdatedAction extends AbstractComponent {
if (type.equals(MapperService.DEFAULT_MAPPING)) {
throw new IllegalArgumentException("_default_ mapping should not be updated");
}
return client.preparePutMapping().setConcreteIndex(index).setType(type).setSource(mappingUpdate.toString())
return client.preparePutMapping().setConcreteIndex(index).setType(type).setSource(mappingUpdate.toString(), XContentType.JSON)
.setMasterNodeTimeout(timeout).setTimeout(timeout);
}

View File

@ -25,7 +25,6 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.json.JsonXContent;
@ -712,10 +711,12 @@ public class Strings {
* @return the delimited String
*/
public static String collectionToDelimitedString(Iterable<?> coll, String delim, String prefix, String suffix) {
return collectionToDelimitedString(coll, delim, prefix, suffix, new StringBuilder());
StringBuilder sb = new StringBuilder();
collectionToDelimitedString(coll, delim, prefix, suffix, sb);
return sb.toString();
}
public static String collectionToDelimitedString(Iterable<?> coll, String delim, String prefix, String suffix, StringBuilder sb) {
public static void collectionToDelimitedString(Iterable<?> coll, String delim, String prefix, String suffix, StringBuilder sb) {
Iterator<?> it = coll.iterator();
while (it.hasNext()) {
sb.append(prefix).append(it.next()).append(suffix);
@ -723,7 +724,6 @@ public class Strings {
sb.append(delim);
}
}
return sb.toString();
}
/**
@ -758,12 +758,14 @@ public class Strings {
* @return the delimited String
*/
public static String arrayToDelimitedString(Object[] arr, String delim) {
return arrayToDelimitedString(arr, delim, new StringBuilder());
StringBuilder sb = new StringBuilder();
arrayToDelimitedString(arr, delim, sb);
return sb.toString();
}
public static String arrayToDelimitedString(Object[] arr, String delim, StringBuilder sb) {
public static void arrayToDelimitedString(Object[] arr, String delim, StringBuilder sb) {
if (isEmpty(arr)) {
return "";
return;
}
for (int i = 0; i < arr.length; i++) {
if (i > 0) {
@ -771,7 +773,6 @@ public class Strings {
}
sb.append(arr[i]);
}
return sb.toString();
}
/**

View File

@ -63,7 +63,7 @@ public class BlobPath implements Iterable<String> {
public String buildAsString() {
String p = String.join(SEPARATOR, paths);
if (p.isEmpty()) {
if (p.isEmpty() || p.endsWith(SEPARATOR)) {
return p;
}
return p + SEPARATOR;

View File

@ -448,12 +448,20 @@ public class XContentHelper {
* {@link XContentType}. Wraps the output into a new anonymous object.
*/
public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, boolean humanReadable) throws IOException {
return toXContent(toXContent, xContentType, ToXContent.EMPTY_PARAMS, humanReadable);
}
/**
* Returns the bytes that represent the XContent output of the provided {@link ToXContent} object, using the provided
* {@link XContentType}. Wraps the output into a new anonymous object.
*/
public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, Params params, boolean humanReadable) throws IOException {
try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) {
builder.humanReadable(humanReadable);
if (toXContent.isFragment()) {
builder.startObject();
}
toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS);
toXContent.toXContent(builder, params);
if (toXContent.isFragment()) {
builder.endObject();
}

View File

@ -39,7 +39,6 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
public class MembershipAction extends AbstractComponent {
@ -63,8 +62,7 @@ public class MembershipAction extends AbstractComponent {
private final MembershipListener listener;
public MembershipAction(Settings settings, TransportService transportService,
Supplier<DiscoveryNode> localNodeSupplier, MembershipListener listener) {
public MembershipAction(Settings settings, TransportService transportService, MembershipListener listener) {
super(settings);
this.transportService = transportService;
this.listener = listener;
@ -73,7 +71,7 @@ public class MembershipAction extends AbstractComponent {
transportService.registerRequestHandler(DISCOVERY_JOIN_ACTION_NAME, JoinRequest::new,
ThreadPool.Names.GENERIC, new JoinRequestRequestHandler());
transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME,
() -> new ValidateJoinRequest(localNodeSupplier), ThreadPool.Names.GENERIC,
() -> new ValidateJoinRequest(), ThreadPool.Names.GENERIC,
new ValidateJoinRequestRequestHandler());
transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new,
ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler());
@ -155,22 +153,18 @@ public class MembershipAction extends AbstractComponent {
}
static class ValidateJoinRequest extends TransportRequest {
private final Supplier<DiscoveryNode> localNode;
private ClusterState state;
ValidateJoinRequest(Supplier<DiscoveryNode> localNode) {
this.localNode = localNode;
}
ValidateJoinRequest() {}
ValidateJoinRequest(ClusterState state) {
this.state = state;
this.localNode = state.nodes()::getLocalNode;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
this.state = ClusterState.readFrom(in, localNode.get());
this.state = ClusterState.readFrom(in, null);
}
@Override

View File

@ -191,7 +191,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
new NewPendingClusterStateListener(),
discoverySettings,
clusterService.getClusterName());
this.membership = new MembershipAction(settings, transportService, this::localNode, new MembershipListener());
this.membership = new MembershipAction(settings, transportService, new MembershipListener());
this.joinThreadControl = new JoinThreadControl();
transportService.registerRequestHandler(

View File

@ -19,6 +19,7 @@
package org.elasticsearch.http;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.transport.PortsRange;
@ -69,7 +70,14 @@ public final class HttpTransportSettings {
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED =
Setting.boolSetting("http.detailed_errors.enabled", true, Property.NodeScope);
public static final Setting<Boolean> SETTING_HTTP_CONTENT_TYPE_REQUIRED =
Setting.boolSetting("http.content_type.required", false, Property.NodeScope);
new Setting<>("http.content_type.required", (s) -> Boolean.toString(true), (s) -> {
final boolean value = Booleans.parseBoolean(s);
if (value == false) {
throw new IllegalArgumentException("http.content_type.required cannot be set to false. It exists only to make a rolling" +
" upgrade easier");
}
return true;
}, Property.NodeScope, Property.Deprecated);
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH =
Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope);
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE =

View File

@ -391,6 +391,14 @@ public abstract class Engine implements Closeable {
this.created = created;
}
/**
* use in case of index operation failed before getting to internal engine
* (e.g while preparing operation or updating mappings)
* */
public IndexResult(Exception failure, long version) {
this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO);
}
public IndexResult(Exception failure, long version, long seqNo) {
super(Operation.TYPE.INDEX, failure, version, seqNo);
this.created = false;

View File

@ -188,7 +188,7 @@ public final class EngineConfig {
/**
* Returns a thread-pool mainly used to get estimated time stamps from
* {@link org.elasticsearch.threadpool.ThreadPool#estimatedTimeInMillis()} and to schedule
* {@link org.elasticsearch.threadpool.ThreadPool#relativeTimeInMillis()} and to schedule
* async force merge calls on the {@link org.elasticsearch.threadpool.ThreadPool.Names#FORCE_MERGE} thread-pool
*/
public ThreadPool getThreadPool() {

View File

@ -147,7 +147,7 @@ public class InternalEngine extends Engine {
EngineMergeScheduler scheduler = null;
boolean success = false;
try {
this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis();
this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().relativeTimeInMillis();
mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings());
throttle = new IndexThrottle();
@ -446,7 +446,7 @@ public class InternalEngine extends Engine {
private long checkDeletedAndGCed(VersionValue versionValue) {
long currentVersion;
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) {
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().relativeTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) {
currentVersion = Versions.NOT_FOUND; // deleted, and GC
} else {
currentVersion = versionValue.version();
@ -478,6 +478,20 @@ public class InternalEngine extends Engine {
return false;
}
private boolean assertVersionType(final Engine.Operation operation) {
if (operation.origin() == Operation.Origin.REPLICA ||
operation.origin() == Operation.Origin.PEER_RECOVERY ||
operation.origin() == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
// ensure that replica operation has expected version type for replication
// ensure that versionTypeForReplicationAndRecovery is idempotent
assert operation.versionType() == operation.versionType().versionTypeForReplicationAndRecovery()
: "unexpected version type in request from [" + operation.origin().name() + "] " +
"found [" + operation.versionType().name() + "] " +
"expected [" + operation.versionType().versionTypeForReplicationAndRecovery().name() + "]";
}
return true;
}
private boolean assertSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) {
if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) && origin == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
// legacy support
@ -499,6 +513,7 @@ public class InternalEngine extends Engine {
try (ReleasableLock releasableLock = readLock.acquire()) {
ensureOpen();
assert assertSequenceNumber(index.origin(), index.seqNo());
assert assertVersionType(index);
final Translog.Location location;
long seqNo = index.seqNo();
try (Releasable ignored = acquireLock(index.uid());
@ -692,6 +707,7 @@ public class InternalEngine extends Engine {
public DeleteResult delete(Delete delete) throws IOException {
DeleteResult result;
try (ReleasableLock ignored = readLock.acquire()) {
assert assertVersionType(delete);
ensureOpen();
// NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments:
result = innerDelete(delete);
@ -710,7 +726,7 @@ public class InternalEngine extends Engine {
private void maybePruneDeletedTombstones() {
// It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it
// every 1/4 of gcDeletesInMillis:
if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().estimatedTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) {
if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) {
pruneDeletedTombstones();
}
}
@ -756,7 +772,7 @@ public class InternalEngine extends Engine {
deleteResult = new DeleteResult(updatedVersion, seqNo, found);
versionMap.putUnderLock(delete.uid().bytes(),
new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis()));
new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().relativeTimeInMillis()));
}
if (!deleteResult.hasFailure()) {
location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY
@ -1031,7 +1047,7 @@ public class InternalEngine extends Engine {
}
private void pruneDeletedTombstones() {
long timeMSec = engineConfig.getThreadPool().estimatedTimeInMillis();
long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis();
// TODO: not good that we reach into LiveVersionMap here; can we move this inside VersionMap instead? problem is the dirtyLock...

View File

@ -162,7 +162,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
}
public static class GeoPointFieldType extends MappedFieldType {
GeoPointFieldType() {
public GeoPointFieldType() {
}
GeoPointFieldType(GeoPointFieldType ref) {

View File

@ -23,7 +23,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.MultiTermQuery;
@ -114,12 +114,12 @@ public class IdFieldMapper extends MetadataFieldMapper {
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
final BytesRef[] uids = Uid.createUidsForTypesAndId(context.queryTypes(), value);
return new TermsQuery(UidFieldMapper.NAME, uids);
return new TermInSetQuery(UidFieldMapper.NAME, uids);
}
@Override
public Query termsQuery(List values, @Nullable QueryShardContext context) {
return new TermsQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(context.queryTypes(), values));
return new TermInSetQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(context.queryTypes(), values));
}
}

View File

@ -124,7 +124,7 @@ public class IpFieldMapper extends FieldMapper {
public static final class IpFieldType extends MappedFieldType {
IpFieldType() {
public IpFieldType() {
super();
setTokenized(false);
setHasDocValues(true);

View File

@ -22,7 +22,7 @@ package org.elasticsearch.index.mapper;
import java.util.List;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.PrefixQuery;
@ -53,7 +53,7 @@ public abstract class StringFieldType extends TermBasedFieldType {
for (int i = 0; i < bytesRefs.length; i++) {
bytesRefs[i] = indexedValueForSearch(values.get(i));
}
return new TermsQuery(name(), bytesRefs);
return new TermInSetQuery(name(), bytesRefs);
}
@Override

View File

@ -22,9 +22,9 @@ package org.elasticsearch.index.mapper;
import java.util.List;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
@ -66,7 +66,7 @@ abstract class TermBasedFieldType extends MappedFieldType {
for (int i = 0; i < bytesRefs.length; i++) {
bytesRefs[i] = indexedValueForSearch(values.get(i));
}
return new TermsQuery(name(), bytesRefs);
return new TermInSetQuery(name(), bytesRefs);
}
}

View File

@ -26,13 +26,13 @@ import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lucene.Lucene;
@ -172,7 +172,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
* Specialization for a disjunction over many _type
*/
public static class TypesQuery extends Query {
// Same threshold as TermsQuery
// Same threshold as TermInSetQuery
private static final int BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD = 16;
private final BytesRef[] types;
@ -220,7 +220,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
}
return new ConstantScoreQuery(bq.build());
}
return new TermsQuery(CONTENT_TYPE, types);
return new TermInSetQuery(CONTENT_TYPE, types);
}
@Override

View File

@ -19,8 +19,8 @@
package org.elasticsearch.index.query;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermInSetQuery;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
@ -175,7 +175,7 @@ public class IdsQueryBuilder extends AbstractQueryBuilder<IdsQueryBuilder> {
Collections.addAll(typesForQuery, types);
}
query = new TermsQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(typesForQuery, ids));
query = new TermInSetQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(typesForQuery, ids));
}
return query;
}

View File

@ -21,10 +21,10 @@ package org.elasticsearch.index.query;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.Fields;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
@ -1165,7 +1165,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
uids.add(createUidAsBytes(item.type(), item.id()));
}
if (!uids.isEmpty()) {
TermsQuery query = new TermsQuery(UidFieldMapper.NAME, uids.toArray(new BytesRef[uids.size()]));
TermInSetQuery query = new TermInSetQuery(UidFieldMapper.NAME, uids.toArray(new BytesRef[uids.size()]));
boolQuery.add(query, BooleanClause.Occur.MUST_NOT);
}
}

Some files were not shown because too many files have changed in this diff Show More