Merge remote-tracking branch 'es/master' into ccr
This commit is contained in:
commit
f50a4ca33f
|
@ -21,167 +21,172 @@
|
|||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
define_opts = {
|
||||
autostart: false
|
||||
}.freeze
|
||||
|
||||
Vagrant.configure(2) do |config|
|
||||
config.vm.define "ubuntu-1404" do |config|
|
||||
config.vm.box = "elastic/ubuntu-14.04-x86_64"
|
||||
ubuntu_common config
|
||||
|
||||
config.vm.provider 'virtualbox' do |vbox|
|
||||
# Give the box more memory and cpu because our tests are beasts!
|
||||
vbox.memory = Integer(ENV['VAGRANT_MEMORY'] || 8192)
|
||||
vbox.cpus = Integer(ENV['VAGRANT_CPUS'] || 4)
|
||||
end
|
||||
config.vm.define "ubuntu-1604" do |config|
|
||||
config.vm.box = "elastic/ubuntu-16.04-x86_64"
|
||||
ubuntu_common config, extra: <<-SHELL
|
||||
# Install Jayatana so we can work around it being present.
|
||||
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
|
||||
SHELL
|
||||
|
||||
# Switch the default share for the project root from /vagrant to
|
||||
# /elasticsearch because /vagrant is confusing when there is a project inside
|
||||
# the elasticsearch project called vagrant....
|
||||
config.vm.synced_folder '.', '/vagrant', disabled: true
|
||||
config.vm.synced_folder '.', '/elasticsearch'
|
||||
|
||||
# Expose project directory. Note that VAGRANT_CWD may not be the same as Dir.pwd
|
||||
PROJECT_DIR = ENV['VAGRANT_PROJECT_DIR'] || Dir.pwd
|
||||
config.vm.synced_folder PROJECT_DIR, '/project'
|
||||
|
||||
'ubuntu-1404'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/ubuntu-14.04-x86_64'
|
||||
deb_common config, box
|
||||
end
|
||||
end
|
||||
'ubuntu-1604'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/ubuntu-16.04-x86_64'
|
||||
deb_common config, box, extra: <<-SHELL
|
||||
# Install Jayatana so we can work around it being present.
|
||||
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
|
||||
SHELL
|
||||
end
|
||||
end
|
||||
# Wheezy's backports don't contain Openjdk 8 and the backflips
|
||||
# required to get the sun jdk on there just aren't worth it. We have
|
||||
# jessie and stretch for testing debian and it works fine.
|
||||
config.vm.define "debian-8" do |config|
|
||||
config.vm.box = "elastic/debian-8-x86_64"
|
||||
deb_common config
|
||||
end
|
||||
config.vm.define "debian-9" do |config|
|
||||
config.vm.box = "elastic/debian-9-x86_64"
|
||||
deb_common config
|
||||
end
|
||||
config.vm.define "centos-6" do |config|
|
||||
config.vm.box = "elastic/centos-6-x86_64"
|
||||
rpm_common config
|
||||
end
|
||||
config.vm.define "centos-7" do |config|
|
||||
config.vm.box = "elastic/centos-7-x86_64"
|
||||
rpm_common config
|
||||
end
|
||||
config.vm.define "oel-6" do |config|
|
||||
config.vm.box = "elastic/oraclelinux-6-x86_64"
|
||||
rpm_common config
|
||||
end
|
||||
config.vm.define "oel-7" do |config|
|
||||
config.vm.box = "elastic/oraclelinux-7-x86_64"
|
||||
rpm_common config
|
||||
end
|
||||
config.vm.define "fedora-26" do |config|
|
||||
config.vm.box = "elastic/fedora-26-x86_64"
|
||||
dnf_common config
|
||||
end
|
||||
config.vm.define "fedora-27" do |config|
|
||||
config.vm.box = "elastic/fedora-27-x86_64"
|
||||
dnf_common config
|
||||
end
|
||||
config.vm.define "opensuse-42" do |config|
|
||||
config.vm.box = "elastic/opensuse-42-x86_64"
|
||||
opensuse_common config
|
||||
end
|
||||
config.vm.define "sles-12" do |config|
|
||||
config.vm.box = "elastic/sles-12-x86_64"
|
||||
sles_common config
|
||||
end
|
||||
# Switch the default share for the project root from /vagrant to
|
||||
# /elasticsearch because /vagrant is confusing when there is a project inside
|
||||
# the elasticsearch project called vagrant....
|
||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
config.vm.synced_folder ".", "/elasticsearch"
|
||||
# Expose project directory
|
||||
PROJECT_DIR = ENV['VAGRANT_PROJECT_DIR'] || Dir.pwd
|
||||
config.vm.synced_folder PROJECT_DIR, "/project"
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
# Give the boxes 3GB because Elasticsearch defaults to using 2GB
|
||||
v.memory = 3072
|
||||
end
|
||||
if Vagrant.has_plugin?("vagrant-cachier")
|
||||
config.cache.scope = :box
|
||||
end
|
||||
config.vm.defined_vms.each do |name, config|
|
||||
config.options[:autostart] = false
|
||||
set_prompt = lambda do |config|
|
||||
# Sets up a consistent prompt for all users. Or tries to. The VM might
|
||||
# contain overrides for root and vagrant but this attempts to work around
|
||||
# them by re-source-ing the standard prompt file.
|
||||
config.vm.provision "prompt", type: "shell", inline: <<-SHELL
|
||||
cat \<\<PROMPT > /etc/profile.d/elasticsearch_prompt.sh
|
||||
export PS1='#{name}:\\w$ '
|
||||
PROMPT
|
||||
grep 'source /etc/profile.d/elasticsearch_prompt.sh' ~/.bashrc |
|
||||
cat \<\<SOURCE_PROMPT >> ~/.bashrc
|
||||
# Replace the standard prompt with a consistent one
|
||||
source /etc/profile.d/elasticsearch_prompt.sh
|
||||
SOURCE_PROMPT
|
||||
grep 'source /etc/profile.d/elasticsearch_prompt.sh' ~vagrant/.bashrc |
|
||||
cat \<\<SOURCE_PROMPT >> ~vagrant/.bashrc
|
||||
# Replace the standard prompt with a consistent one
|
||||
source /etc/profile.d/elasticsearch_prompt.sh
|
||||
SOURCE_PROMPT
|
||||
SHELL
|
||||
# Creates a file to mark the machine as created by vagrant. Tests check
|
||||
# for this file and refuse to run if it is not present so that they can't
|
||||
# be run unexpectedly.
|
||||
config.vm.provision "markerfile", type: "shell", inline: <<-SHELL
|
||||
touch /etc/is_vagrant_vm
|
||||
SHELL
|
||||
'debian-8'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/debian-8-x86_64'
|
||||
deb_common config, box
|
||||
end
|
||||
end
|
||||
'debian-9'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/debian-9-x86_64'
|
||||
deb_common config, box
|
||||
end
|
||||
end
|
||||
'centos-6'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/centos-6-x86_64'
|
||||
rpm_common config, box
|
||||
end
|
||||
end
|
||||
'centos-7'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/centos-7-x86_64'
|
||||
rpm_common config, box
|
||||
end
|
||||
end
|
||||
'oel-6'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/oraclelinux-6-x86_64'
|
||||
rpm_common config, box
|
||||
end
|
||||
end
|
||||
'oel-7'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/oraclelinux-7-x86_64'
|
||||
rpm_common config, box
|
||||
end
|
||||
end
|
||||
'fedora-26'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/fedora-26-x86_64'
|
||||
dnf_common config, box
|
||||
end
|
||||
end
|
||||
'fedora-27'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/fedora-27-x86_64'
|
||||
dnf_common config, box
|
||||
end
|
||||
end
|
||||
'opensuse-42'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/opensuse-42-x86_64'
|
||||
suse_common config, box
|
||||
end
|
||||
end
|
||||
'sles-12'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/sles-12-x86_64'
|
||||
sles_common config, box
|
||||
end
|
||||
config.config_procs.push ['2', set_prompt]
|
||||
end
|
||||
end
|
||||
|
||||
def ubuntu_common(config, extra: '')
|
||||
deb_common config, extra: extra
|
||||
end
|
||||
|
||||
def deb_common(config, extra: '')
|
||||
def deb_common(config, name, extra: '')
|
||||
# http://foo-o-rama.com/vagrant--stdin-is-not-a-tty--fix.html
|
||||
config.vm.provision "fix-no-tty", type: "shell" do |s|
|
||||
config.vm.provision 'fix-no-tty', type: 'shell' do |s|
|
||||
s.privileged = false
|
||||
s.inline = "sudo sed -i '/tty/!s/mesg n/tty -s \\&\\& mesg n/' /root/.profile"
|
||||
end
|
||||
provision(config,
|
||||
update_command: "apt-get update",
|
||||
update_tracking_file: "/var/cache/apt/archives/last_update",
|
||||
install_command: "apt-get install -y",
|
||||
extra: extra)
|
||||
linux_common(
|
||||
config,
|
||||
name,
|
||||
update_command: 'apt-get update',
|
||||
update_tracking_file: '/var/cache/apt/archives/last_update',
|
||||
install_command: 'apt-get install -y',
|
||||
extra: extra
|
||||
)
|
||||
end
|
||||
|
||||
def rpm_common(config)
|
||||
provision(config,
|
||||
update_command: "yum check-update",
|
||||
update_tracking_file: "/var/cache/yum/last_update",
|
||||
install_command: "yum install -y")
|
||||
def rpm_common(config, name)
|
||||
linux_common(
|
||||
config,
|
||||
name,
|
||||
update_command: 'yum check-update',
|
||||
update_tracking_file: '/var/cache/yum/last_update',
|
||||
install_command: 'yum install -y'
|
||||
)
|
||||
end
|
||||
|
||||
def dnf_common(config)
|
||||
provision(config,
|
||||
update_command: "dnf check-update",
|
||||
update_tracking_file: "/var/cache/dnf/last_update",
|
||||
install_command: "dnf install -y",
|
||||
install_command_retries: 5)
|
||||
if Vagrant.has_plugin?("vagrant-cachier")
|
||||
# Autodetect doesn't work....
|
||||
def dnf_common(config, name)
|
||||
# Autodetect doesn't work....
|
||||
if Vagrant.has_plugin?('vagrant-cachier')
|
||||
config.cache.auto_detect = false
|
||||
config.cache.enable :generic, { :cache_dir => "/var/cache/dnf" }
|
||||
config.cache.enable :generic, { :cache_dir => '/var/cache/dnf' }
|
||||
end
|
||||
linux_common(
|
||||
config,
|
||||
name,
|
||||
update_command: 'dnf check-update',
|
||||
update_tracking_file: '/var/cache/dnf/last_update',
|
||||
install_command: 'dnf install -y',
|
||||
install_command_retries: 5
|
||||
)
|
||||
end
|
||||
|
||||
def opensuse_common(config)
|
||||
suse_common config, ''
|
||||
def suse_common(config, name, extra: '')
|
||||
linux_common(
|
||||
config,
|
||||
name,
|
||||
update_command: 'zypper --non-interactive list-updates',
|
||||
update_tracking_file: '/var/cache/zypp/packages/last_update',
|
||||
install_command: 'zypper --non-interactive --quiet install --no-recommends',
|
||||
extra: extra
|
||||
)
|
||||
end
|
||||
|
||||
def suse_common(config, extra)
|
||||
provision(config,
|
||||
update_command: "zypper --non-interactive list-updates",
|
||||
update_tracking_file: "/var/cache/zypp/packages/last_update",
|
||||
install_command: "zypper --non-interactive --quiet install --no-recommends",
|
||||
extra: extra)
|
||||
end
|
||||
|
||||
def sles_common(config)
|
||||
def sles_common(config, name)
|
||||
extra = <<-SHELL
|
||||
zypper rr systemsmanagement_puppet puppetlabs-pc1
|
||||
zypper --non-interactive install git-core
|
||||
SHELL
|
||||
suse_common config, extra
|
||||
SHELL
|
||||
suse_common config, name, extra: extra
|
||||
end
|
||||
|
||||
# Register the main box provisioning script.
|
||||
# Configuration needed for all linux boxes
|
||||
# @param config Vagrant's config object. Required.
|
||||
# @param name [String] The box name. Required.
|
||||
# @param update_command [String] The command used to update the package
|
||||
# manager. Required. Think `apt-get update`.
|
||||
# @param update_tracking_file [String] The location of the file tracking the
|
||||
|
@ -189,24 +194,76 @@ end
|
|||
# is cached by vagrant-cachier.
|
||||
# @param install_command [String] The command used to install a package.
|
||||
# Required. Think `apt-get install #{package}`.
|
||||
# @param extra [String] Extra provisioning commands run before anything else.
|
||||
# Optional. Used for things like setting up the ppa for Java 8.
|
||||
def provision(config,
|
||||
update_command: 'required',
|
||||
update_tracking_file: 'required',
|
||||
install_command: 'required',
|
||||
install_command_retries: 0,
|
||||
extra: '')
|
||||
# Vagrant run ruby 2.0.0 which doesn't have required named parameters....
|
||||
raise ArgumentError.new('update_command is required') if update_command == 'required'
|
||||
raise ArgumentError.new('update_tracking_file is required') if update_tracking_file == 'required'
|
||||
raise ArgumentError.new('install_command is required') if install_command == 'required'
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
# Give the box more memory and cpu because our tests are beasts!
|
||||
v.memory = Integer(ENV['VAGRANT_MEMORY'] || 8192)
|
||||
v.cpus = Integer(ENV['VAGRANT_CPUS'] || 4)
|
||||
# @param install_command_retries [Integer] Number of times to retry
|
||||
# a failed install command
|
||||
# @param extra [String] Additional script to run before installing
|
||||
# dependencies
|
||||
#
|
||||
def linux_common(config,
|
||||
name,
|
||||
update_command: 'required',
|
||||
update_tracking_file: 'required',
|
||||
install_command: 'required',
|
||||
install_command_retries: 0,
|
||||
extra: '')
|
||||
|
||||
raise ArgumentError, 'update_command is required' if update_command == 'required'
|
||||
raise ArgumentError, 'update_tracking_file is required' if update_tracking_file == 'required'
|
||||
raise ArgumentError, 'install_command is required' if install_command == 'required'
|
||||
|
||||
if Vagrant.has_plugin?('vagrant-cachier')
|
||||
config.cache.scope = :box
|
||||
end
|
||||
config.vm.provision "dependencies", type: "shell", inline: <<-SHELL
|
||||
|
||||
config.vm.provision 'markerfile', type: 'shell', inline: <<-SHELL
|
||||
touch /etc/is_vagrant_vm
|
||||
SHELL
|
||||
|
||||
# This prevents leftovers from previous tests using the
|
||||
# same VM from messing up the current test
|
||||
config.vm.provision 'clean es installs in tmp', run: 'always', type: 'shell', inline: <<-SHELL
|
||||
rm -rf /tmp/elasticsearch*
|
||||
SHELL
|
||||
|
||||
sh_set_prompt config, name
|
||||
sh_install_deps(
|
||||
config,
|
||||
update_command,
|
||||
update_tracking_file,
|
||||
install_command,
|
||||
install_command_retries,
|
||||
extra
|
||||
)
|
||||
end
|
||||
|
||||
# Sets up a consistent prompt for all users. Or tries to. The VM might
|
||||
# contain overrides for root and vagrant but this attempts to work around
|
||||
# them by re-source-ing the standard prompt file.
|
||||
def sh_set_prompt(config, name)
|
||||
config.vm.provision 'set prompt', type: 'shell', inline: <<-SHELL
|
||||
cat \<\<PROMPT > /etc/profile.d/elasticsearch_prompt.sh
|
||||
export PS1='#{name}:\\w$ '
|
||||
PROMPT
|
||||
grep 'source /etc/profile.d/elasticsearch_prompt.sh' ~/.bashrc |
|
||||
cat \<\<SOURCE_PROMPT >> ~/.bashrc
|
||||
# Replace the standard prompt with a consistent one
|
||||
source /etc/profile.d/elasticsearch_prompt.sh
|
||||
SOURCE_PROMPT
|
||||
grep 'source /etc/profile.d/elasticsearch_prompt.sh' ~vagrant/.bashrc |
|
||||
cat \<\<SOURCE_PROMPT >> ~vagrant/.bashrc
|
||||
# Replace the standard prompt with a consistent one
|
||||
source /etc/profile.d/elasticsearch_prompt.sh
|
||||
SOURCE_PROMPT
|
||||
SHELL
|
||||
end
|
||||
|
||||
def sh_install_deps(config,
|
||||
update_command,
|
||||
update_tracking_file,
|
||||
install_command,
|
||||
install_command_retries,
|
||||
extra)
|
||||
config.vm.provision 'install dependencies', type: 'shell', inline: <<-SHELL
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
|
@ -240,9 +297,9 @@ def provision(config,
|
|||
echo "==> Installing $1"
|
||||
if [ #{install_command_retries} -eq 0 ]
|
||||
then
|
||||
#{install_command} $1
|
||||
#{install_command} $1
|
||||
else
|
||||
retry_installcommand $1 #{install_command_retries}
|
||||
retry_installcommand $1 #{install_command_retries}
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -253,12 +310,13 @@ def provision(config,
|
|||
#{extra}
|
||||
|
||||
installed java || {
|
||||
echo "==> Java is not installed on vagrant box ${config.vm.box}"
|
||||
echo "==> Java is not installed"
|
||||
return 1
|
||||
}
|
||||
ensure tar
|
||||
ensure curl
|
||||
ensure unzip
|
||||
ensure rsync
|
||||
|
||||
installed bats || {
|
||||
# Bats lives in a git repository....
|
||||
|
@ -292,9 +350,4 @@ Defaults env_keep += "BATS_ARCHIVES"
|
|||
SUDOERS_VARS
|
||||
chmod 0440 /etc/sudoers.d/elasticsearch_vars
|
||||
SHELL
|
||||
# This prevents leftovers from previous tests using the
|
||||
# same VM from messing up the current test
|
||||
config.vm.provision "clean_tmp", run: "always", type: "shell", inline: <<-SHELL
|
||||
rm -rf /tmp/elasticsearch*
|
||||
SHELL
|
||||
end
|
||||
|
|
|
@ -185,6 +185,7 @@ subprojects {
|
|||
"org.elasticsearch:elasticsearch-cli:${version}": ':server:cli',
|
||||
"org.elasticsearch:elasticsearch-core:${version}": ':libs:elasticsearch-core',
|
||||
"org.elasticsearch:elasticsearch-nio:${version}": ':libs:elasticsearch-nio',
|
||||
"org.elasticsearch:elasticsearch-secure-sm:${version}": ':libs:secure-sm',
|
||||
"org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest',
|
||||
"org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer',
|
||||
"org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}": ':client:rest-high-level',
|
||||
|
|
|
@ -62,6 +62,9 @@ class RandomizedTestingPlugin implements Plugin<Project> {
|
|||
RandomizedTestingTask newTestTask = tasks.create(properties)
|
||||
newTestTask.classpath = oldTestTask.classpath
|
||||
newTestTask.testClassesDir = oldTestTask.project.sourceSets.test.output.classesDir
|
||||
// since gradle 4.5, tasks immutable dependencies are "hidden" (do not show up in dependsOn)
|
||||
// so we must explicitly add a dependency on generating the test classpath
|
||||
newTestTask.dependsOn('testClasses')
|
||||
|
||||
// hack so check task depends on custom test
|
||||
Task checkTask = tasks.findByPath('check')
|
||||
|
|
|
@ -567,6 +567,9 @@ class BuildPlugin implements Plugin<Project> {
|
|||
File heapdumpDir = new File(project.buildDir, 'heapdump')
|
||||
heapdumpDir.mkdirs()
|
||||
jvmArg '-XX:HeapDumpPath=' + heapdumpDir
|
||||
if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9) {
|
||||
jvmArg '--illegal-access=warn'
|
||||
}
|
||||
argLine System.getProperty('tests.jvm.argline')
|
||||
|
||||
// we use './temp' since this is per JVM and tests are forbidden from writing to CWD
|
||||
|
|
|
@ -123,6 +123,14 @@ class ClusterConfiguration {
|
|||
return tmpFile.exists()
|
||||
}
|
||||
|
||||
/**
|
||||
* The maximum number of seconds to wait for nodes to complete startup, which includes writing
|
||||
* the ports files for the transports and the pid file. This wait time occurs before the wait
|
||||
* condition is executed.
|
||||
*/
|
||||
@Input
|
||||
int nodeStartupWaitSeconds = 30
|
||||
|
||||
public ClusterConfiguration(Project project) {
|
||||
this.project = project
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.gradle.LoggedExec
|
|||
import org.elasticsearch.gradle.Version
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin
|
||||
import org.elasticsearch.gradle.plugin.MetaPluginPropertiesExtension
|
||||
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
|
||||
import org.elasticsearch.gradle.plugin.PluginPropertiesExtension
|
||||
import org.gradle.api.AntBuilder
|
||||
|
@ -120,7 +119,7 @@ class ClusterFormationTasks {
|
|||
startTasks.add(configureNode(project, prefix, runner, dependsOn, node, config, distro, nodes.get(0)))
|
||||
}
|
||||
|
||||
Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks)
|
||||
Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks, config.nodeStartupWaitSeconds)
|
||||
runner.dependsOn(wait)
|
||||
|
||||
return nodes
|
||||
|
@ -577,10 +576,10 @@ class ClusterFormationTasks {
|
|||
return start
|
||||
}
|
||||
|
||||
static Task configureWaitTask(String name, Project project, List<NodeInfo> nodes, List<Task> startTasks) {
|
||||
static Task configureWaitTask(String name, Project project, List<NodeInfo> nodes, List<Task> startTasks, int waitSeconds) {
|
||||
Task wait = project.tasks.create(name: name, dependsOn: startTasks)
|
||||
wait.doLast {
|
||||
ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") {
|
||||
ant.waitfor(maxwait: "${waitSeconds}", maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") {
|
||||
or {
|
||||
for (NodeInfo node : nodes) {
|
||||
resourceexists {
|
||||
|
@ -610,6 +609,17 @@ class ClusterFormationTasks {
|
|||
waitFailed(project, nodes, logger, 'Failed to start elasticsearch')
|
||||
}
|
||||
|
||||
// make sure all files exist otherwise we haven't fully started up
|
||||
boolean missingFile = false
|
||||
for (NodeInfo node : nodes) {
|
||||
missingFile |= node.pidFile.exists() == false
|
||||
missingFile |= node.httpPortsFile.exists() == false
|
||||
missingFile |= node.transportPortsFile.exists() == false
|
||||
}
|
||||
if (missingFile) {
|
||||
waitFailed(project, nodes, logger, 'Elasticsearch did not complete startup in time allotted')
|
||||
}
|
||||
|
||||
// go through each node checking the wait condition
|
||||
for (NodeInfo node : nodes) {
|
||||
// first bind node info to the closure, then pass to the ant runner so we can get good logging
|
||||
|
|
|
@ -162,7 +162,7 @@ public class RestIntegTestTask extends DefaultTask {
|
|||
if (line.startsWith("[")) {
|
||||
inExcerpt = false // clear with the next log message
|
||||
}
|
||||
if (line =~ /(\[WARN\])|(\[ERROR\])/) {
|
||||
if (line =~ /(\[WARN *\])|(\[ERROR *\])/) {
|
||||
inExcerpt = true // show warnings and errors
|
||||
}
|
||||
if (inStartup || inExcerpt) {
|
||||
|
|
|
@ -417,7 +417,6 @@
|
|||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardsService.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotsService.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]threadpool[/\\]ThreadPool.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]tribe[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeService.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]queries[/\\]BlendedTermQueryTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]VersionTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]RejectionActionIT.java" checks="LineLength" />
|
||||
|
|
|
@ -82,3 +82,56 @@ org.joda.time.DateTime#<init>(int, int, int, int, int, int)
|
|||
org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#now()
|
||||
org.joda.time.DateTimeZone#getDefault()
|
||||
|
||||
@defaultMessage Local times may be ambiguous or nonexistent in a specific time zones. Use ZoneRules#getValidOffsets() instead.
|
||||
java.time.LocalDateTime#atZone(java.time.ZoneId)
|
||||
java.time.ZonedDateTime#of(int, int, int, int, int, int, int, java.time.ZoneId)
|
||||
java.time.ZonedDateTime#of(java.time.LocalDate, java.time.LocalTime, java.time.ZoneId)
|
||||
java.time.ZonedDateTime#of(java.time.LocalDateTime, java.time.ZoneId)
|
||||
java.time.ZonedDateTime#truncatedTo(java.time.temporal.TemporalUnit)
|
||||
java.time.ZonedDateTime#of(int, int, int, int, int, int, int, java.time.ZoneId)
|
||||
java.time.ZonedDateTime#of(java.time.LocalDate, java.time.LocalTime, java.time.ZoneId)
|
||||
java.time.ZonedDateTime#of(java.time.LocalDateTime, java.time.ZoneId)
|
||||
java.time.ZonedDateTime#ofLocal(java.time.LocalDateTime, java.time.ZoneId, java.time.ZoneOffset)
|
||||
java.time.OffsetDateTime#atZoneSimilarLocal(java.time.ZoneId)
|
||||
java.time.zone.ZoneRules#getOffset(java.time.LocalDateTime)
|
||||
|
||||
@defaultMessage Manipulation of an OffsetDateTime may yield a time that is not valid in the desired time zone. Use ZonedDateTime instead.
|
||||
java.time.OffsetDateTime#minus(long, java.time.temporal.TemporalUnit)
|
||||
java.time.OffsetDateTime#minus(long, java.time.temporal.TemporalUnit)
|
||||
java.time.OffsetDateTime#minus(java.time.temporal.TemporalAmount)
|
||||
java.time.OffsetDateTime#minusDays(long)
|
||||
java.time.OffsetDateTime#minusHours(long)
|
||||
java.time.OffsetDateTime#minusMinutes(long)
|
||||
java.time.OffsetDateTime#minusMonths(long)
|
||||
java.time.OffsetDateTime#minusNanos(long)
|
||||
java.time.OffsetDateTime#minusSeconds(long)
|
||||
java.time.OffsetDateTime#minusWeeks(long)
|
||||
java.time.OffsetDateTime#minusYears(long)
|
||||
java.time.OffsetDateTime#plus(long, java.time.temporal.TemporalUnit)
|
||||
java.time.OffsetDateTime#plus(java.time.temporal.TemporalAmount)
|
||||
java.time.OffsetDateTime#plusDays(long)
|
||||
java.time.OffsetDateTime#plusHours(long)
|
||||
java.time.OffsetDateTime#plusMinutes(long)
|
||||
java.time.OffsetDateTime#plusMonths(long)
|
||||
java.time.OffsetDateTime#plusNanos(long)
|
||||
java.time.OffsetDateTime#plusSeconds(long)
|
||||
java.time.OffsetDateTime#plusWeeks(long)
|
||||
java.time.OffsetDateTime#plusYears(long)
|
||||
java.time.OffsetDateTime#with(java.time.temporal.TemporalAdjuster)
|
||||
java.time.OffsetDateTime#with(java.time.temporal.TemporalField, long)
|
||||
java.time.OffsetDateTime#withDayOfMonth(int)
|
||||
java.time.OffsetDateTime#withDayOfYear(int)
|
||||
java.time.OffsetDateTime#withHour(int)
|
||||
java.time.OffsetDateTime#withMinute(int)
|
||||
java.time.OffsetDateTime#withMonth(int)
|
||||
java.time.OffsetDateTime#withNano(int)
|
||||
java.time.OffsetDateTime#withOffsetSameInstant(java.time.ZoneOffset)
|
||||
java.time.OffsetDateTime#withOffsetSameLocal(java.time.ZoneOffset)
|
||||
java.time.OffsetDateTime#withSecond(int)
|
||||
java.time.OffsetDateTime#withYear(int)
|
||||
|
||||
@defaultMessage Daylight saving is not the only reason for a change in timezone offset.
|
||||
java.time.zone.ZoneRules#getStandardOffset(java.time.Instant)
|
||||
java.time.zone.ZoneRules#getDaylightSavings(java.time.Instant)
|
||||
java.time.zone.ZoneRules#isDaylightSavings(java.time.Instant)
|
||||
|
|
|
@ -21,20 +21,28 @@ package org.elasticsearch.client;
|
|||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
|
||||
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
|
||||
/**
|
||||
* A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Indices API.
|
||||
* <p>
|
||||
|
@ -55,7 +63,7 @@ public final class IndicesClient {
|
|||
*/
|
||||
public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -66,7 +74,7 @@ public final class IndicesClient {
|
|||
*/
|
||||
public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener<DeleteIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -77,7 +85,7 @@ public final class IndicesClient {
|
|||
*/
|
||||
public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -88,7 +96,7 @@ public final class IndicesClient {
|
|||
*/
|
||||
public void createAsync(CreateIndexRequest createIndexRequest, ActionListener<CreateIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -99,7 +107,7 @@ public final class IndicesClient {
|
|||
*/
|
||||
public PutMappingResponse putMapping(PutMappingRequest putMappingRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -111,7 +119,32 @@ public final class IndicesClient {
|
|||
public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener<PutMappingResponse> listener,
|
||||
Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates aliases using the Index Aliases API
|
||||
* <p>
|
||||
* See <a href=
|
||||
* "https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html">
|
||||
* Index Aliases API on elastic.co</a>
|
||||
*/
|
||||
public IndicesAliasesResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, Request::updateAliases,
|
||||
IndicesAliasesResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates aliases using the Index Aliases API
|
||||
* <p>
|
||||
* See <a href=
|
||||
* "https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html">
|
||||
* Index Aliases API on elastic.co</a>
|
||||
*/
|
||||
public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, ActionListener<IndicesAliasesResponse> listener,
|
||||
Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, Request::updateAliases,
|
||||
IndicesAliasesResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -122,7 +155,7 @@ public final class IndicesClient {
|
|||
*/
|
||||
public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -133,7 +166,7 @@ public final class IndicesClient {
|
|||
*/
|
||||
public void openAsync(OpenIndexRequest openIndexRequest, ActionListener<OpenIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -144,7 +177,7 @@ public final class IndicesClient {
|
|||
*/
|
||||
public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -155,6 +188,105 @@ public final class IndicesClient {
|
|||
*/
|
||||
public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener<CloseIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if one or more aliases exist using the Aliases Exist API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html">
|
||||
* Indices Aliases API on elastic.co</a>
|
||||
*/
|
||||
public boolean existsAlias(GetAliasesRequest getAliasesRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequest(getAliasesRequest, Request::existsAlias, RestHighLevelClient::convertExistsResponse,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously checks if one or more aliases exist using the Aliases Exist API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html">
|
||||
* Indices Aliases API on elastic.co</a>
|
||||
*/
|
||||
public void existsAliasAsync(GetAliasesRequest getAliasesRequest, ActionListener<Boolean> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsync(getAliasesRequest, Request::existsAlias, RestHighLevelClient::convertExistsResponse,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the index (indices) exists or not.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html">
|
||||
* Indices Exists API on elastic.co</a>
|
||||
*/
|
||||
public boolean exists(GetIndexRequest request, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequest(
|
||||
request,
|
||||
Request::indicesExist,
|
||||
RestHighLevelClient::convertExistsResponse,
|
||||
Collections.emptySet(),
|
||||
headers
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously checks if the index (indices) exists or not.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html">
|
||||
* Indices Exists API on elastic.co</a>
|
||||
*/
|
||||
public void existsAsync(GetIndexRequest request, ActionListener<Boolean> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsync(
|
||||
request,
|
||||
Request::indicesExist,
|
||||
RestHighLevelClient::convertExistsResponse,
|
||||
listener,
|
||||
Collections.emptySet(),
|
||||
headers
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Shrinks an index using the Shrink Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html">
|
||||
* Shrink Index API on elastic.co</a>
|
||||
*/
|
||||
public ResizeResponse shrink(ResizeRequest resizeRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(resizeRequest, Request::shrink, ResizeResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously shrinks an index using the Shrink index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html">
|
||||
* Shrink Index API on elastic.co</a>
|
||||
*/
|
||||
public void shrinkAsync(ResizeRequest resizeRequest, ActionListener<ResizeResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, Request::shrink, ResizeResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Splits an index using the Split Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html">
|
||||
* Shrink Index API on elastic.co</a>
|
||||
*/
|
||||
public ResizeResponse split(ResizeRequest resizeRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(resizeRequest, Request::split, ResizeResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously splits an index using the Split Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html">
|
||||
* Split Index API on elastic.co</a>
|
||||
*/
|
||||
public void splitAsync(ResizeRequest resizeRequest, ActionListener<ResizeResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, Request::split, ResizeResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,11 +29,16 @@ import org.apache.http.entity.ByteArrayEntity;
|
|||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
|
@ -61,6 +66,7 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.rankeval.RankEvalRequest;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
|
||||
|
@ -69,6 +75,7 @@ import java.io.IOException;
|
|||
import java.nio.charset.Charset;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
@ -132,7 +139,7 @@ public final class Request {
|
|||
}
|
||||
|
||||
static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) {
|
||||
String endpoint = endpoint(deleteIndexRequest.indices(), Strings.EMPTY_ARRAY, "");
|
||||
String endpoint = endpoint(deleteIndexRequest.indices());
|
||||
|
||||
Params parameters = Params.builder();
|
||||
parameters.withTimeout(deleteIndexRequest.timeout());
|
||||
|
@ -143,7 +150,7 @@ public final class Request {
|
|||
}
|
||||
|
||||
static Request openIndex(OpenIndexRequest openIndexRequest) {
|
||||
String endpoint = endpoint(openIndexRequest.indices(), Strings.EMPTY_ARRAY, "_open");
|
||||
String endpoint = endpoint(openIndexRequest.indices(), "_open");
|
||||
|
||||
Params parameters = Params.builder();
|
||||
|
||||
|
@ -156,7 +163,7 @@ public final class Request {
|
|||
}
|
||||
|
||||
static Request closeIndex(CloseIndexRequest closeIndexRequest) {
|
||||
String endpoint = endpoint(closeIndexRequest.indices(), Strings.EMPTY_ARRAY, "_close");
|
||||
String endpoint = endpoint(closeIndexRequest.indices(), "_close");
|
||||
|
||||
Params parameters = Params.builder();
|
||||
|
||||
|
@ -168,7 +175,7 @@ public final class Request {
|
|||
}
|
||||
|
||||
static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException {
|
||||
String endpoint = endpoint(createIndexRequest.indices(), Strings.EMPTY_ARRAY, "");
|
||||
String endpoint = endpoint(createIndexRequest.indices());
|
||||
|
||||
Params parameters = Params.builder();
|
||||
parameters.withTimeout(createIndexRequest.timeout());
|
||||
|
@ -179,6 +186,15 @@ public final class Request {
|
|||
return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity);
|
||||
}
|
||||
|
||||
static Request updateAliases(IndicesAliasesRequest indicesAliasesRequest) throws IOException {
|
||||
Params parameters = Params.builder();
|
||||
parameters.withTimeout(indicesAliasesRequest.timeout());
|
||||
parameters.withMasterTimeout(indicesAliasesRequest.masterNodeTimeout());
|
||||
|
||||
HttpEntity entity = createEntity(indicesAliasesRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpPost.METHOD_NAME, "/_aliases", parameters.getParams(), entity);
|
||||
}
|
||||
|
||||
static Request putMapping(PutMappingRequest putMappingRequest) throws IOException {
|
||||
// The concreteIndex is an internal concept, not applicable to requests made over the REST API.
|
||||
if (putMappingRequest.getConcreteIndex() != null) {
|
||||
|
@ -348,7 +364,7 @@ public final class Request {
|
|||
parameters.withRealtime(multiGetRequest.realtime());
|
||||
parameters.withRefresh(multiGetRequest.refresh());
|
||||
HttpEntity entity = createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpGet.METHOD_NAME, "/_mget", parameters.getParams(), entity);
|
||||
return new Request(HttpPost.METHOD_NAME, "/_mget", parameters.getParams(), entity);
|
||||
}
|
||||
|
||||
static Request index(IndexRequest indexRequest) {
|
||||
|
@ -429,6 +445,9 @@ public final class Request {
|
|||
if (searchRequest.requestCache() != null) {
|
||||
params.putParam("request_cache", Boolean.toString(searchRequest.requestCache()));
|
||||
}
|
||||
if (searchRequest.allowPartialSearchResults() != null) {
|
||||
params.putParam("allow_partial_search_results", Boolean.toString(searchRequest.allowPartialSearchResults()));
|
||||
}
|
||||
params.putParam("batched_reduce_size", Integer.toString(searchRequest.getBatchedReduceSize()));
|
||||
if (searchRequest.scroll() != null) {
|
||||
params.putParam("scroll", searchRequest.scroll().keepAlive());
|
||||
|
@ -437,17 +456,17 @@ public final class Request {
|
|||
if (searchRequest.source() != null) {
|
||||
entity = createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE);
|
||||
}
|
||||
return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), entity);
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, params.getParams(), entity);
|
||||
}
|
||||
|
||||
static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException {
|
||||
HttpEntity entity = createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request("GET", "/_search/scroll", Collections.emptyMap(), entity);
|
||||
return new Request(HttpPost.METHOD_NAME, "/_search/scroll", Collections.emptyMap(), entity);
|
||||
}
|
||||
|
||||
static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOException {
|
||||
HttpEntity entity = createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request("DELETE", "/_search/scroll", Collections.emptyMap(), entity);
|
||||
return new Request(HttpDelete.METHOD_NAME, "/_search/scroll", Collections.emptyMap(), entity);
|
||||
}
|
||||
|
||||
static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException {
|
||||
|
@ -459,7 +478,52 @@ public final class Request {
|
|||
XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent();
|
||||
byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent);
|
||||
HttpEntity entity = new ByteArrayEntity(source, createContentType(xContent.type()));
|
||||
return new Request("GET", "/_msearch", params.getParams(), entity);
|
||||
return new Request(HttpPost.METHOD_NAME, "/_msearch", params.getParams(), entity);
|
||||
}
|
||||
|
||||
static Request existsAlias(GetAliasesRequest getAliasesRequest) {
|
||||
Params params = Params.builder();
|
||||
params.withIndicesOptions(getAliasesRequest.indicesOptions());
|
||||
params.withLocal(getAliasesRequest.local());
|
||||
if (getAliasesRequest.indices().length == 0 && getAliasesRequest.aliases().length == 0) {
|
||||
throw new IllegalArgumentException("existsAlias requires at least an alias or an index");
|
||||
}
|
||||
String endpoint = endpoint(getAliasesRequest.indices(), "_alias", getAliasesRequest.aliases());
|
||||
return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null);
|
||||
}
|
||||
|
||||
static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException {
|
||||
// TODO maybe indices should be propery of RankEvalRequest and not of the spec
|
||||
List<String> indices = rankEvalRequest.getRankEvalSpec().getIndices();
|
||||
String endpoint = endpoint(indices.toArray(new String[indices.size()]), Strings.EMPTY_ARRAY, "_rank_eval");
|
||||
HttpEntity entity = null;
|
||||
entity = createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpGet.METHOD_NAME, endpoint, Collections.emptyMap(), entity);
|
||||
}
|
||||
|
||||
static Request split(ResizeRequest resizeRequest) throws IOException {
|
||||
if (resizeRequest.getResizeType() != ResizeType.SPLIT) {
|
||||
throw new IllegalArgumentException("Wrong resize type [" + resizeRequest.getResizeType() + "] for indices split request");
|
||||
}
|
||||
return resize(resizeRequest);
|
||||
}
|
||||
|
||||
static Request shrink(ResizeRequest resizeRequest) throws IOException {
|
||||
if (resizeRequest.getResizeType() != ResizeType.SHRINK) {
|
||||
throw new IllegalArgumentException("Wrong resize type [" + resizeRequest.getResizeType() + "] for indices shrink request");
|
||||
}
|
||||
return resize(resizeRequest);
|
||||
}
|
||||
|
||||
private static Request resize(ResizeRequest resizeRequest) throws IOException {
|
||||
Params params = Params.builder();
|
||||
params.withTimeout(resizeRequest.timeout());
|
||||
params.withMasterTimeout(resizeRequest.masterNodeTimeout());
|
||||
params.withWaitForActiveShards(resizeRequest.getTargetIndexRequest().waitForActiveShards());
|
||||
String endpoint = buildEndpoint(resizeRequest.getSourceIndex(), "_" + resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT),
|
||||
resizeRequest.getTargetIndexRequest().index());
|
||||
HttpEntity entity = createEntity(resizeRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpPut.METHOD_NAME, endpoint, params.getParams(), entity);
|
||||
}
|
||||
|
||||
private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
|
||||
|
@ -467,8 +531,28 @@ public final class Request {
|
|||
return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType));
|
||||
}
|
||||
|
||||
static String endpoint(String index, String type, String id) {
|
||||
return buildEndpoint(index, type, id);
|
||||
}
|
||||
|
||||
static String endpoint(String index, String type, String id, String endpoint) {
|
||||
return buildEndpoint(index, type, id, endpoint);
|
||||
}
|
||||
|
||||
static String endpoint(String[] indices) {
|
||||
return buildEndpoint(String.join(",", indices));
|
||||
}
|
||||
|
||||
static String endpoint(String[] indices, String endpoint) {
|
||||
return buildEndpoint(String.join(",", indices), endpoint);
|
||||
}
|
||||
|
||||
static String endpoint(String[] indices, String[] types, String endpoint) {
|
||||
return endpoint(String.join(",", indices), String.join(",", types), endpoint);
|
||||
return buildEndpoint(String.join(",", indices), String.join(",", types), endpoint);
|
||||
}
|
||||
|
||||
static String endpoint(String[] indices, String endpoint, String[] suffixes) {
|
||||
return buildEndpoint(String.join(",", indices), endpoint, String.join(",", suffixes));
|
||||
}
|
||||
|
||||
static String endpoint(String[] indices, String endpoint, String type) {
|
||||
|
@ -476,9 +560,9 @@ public final class Request {
|
|||
}
|
||||
|
||||
/**
|
||||
* Utility method to build request's endpoint.
|
||||
* Utility method to build request's endpoint given its parts as strings
|
||||
*/
|
||||
static String endpoint(String... parts) {
|
||||
static String buildEndpoint(String... parts) {
|
||||
StringJoiner joiner = new StringJoiner("/", "/", "");
|
||||
for (String part : parts) {
|
||||
if (Strings.hasLength(part)) {
|
||||
|
@ -499,6 +583,17 @@ public final class Request {
|
|||
return ContentType.create(xContentType.mediaTypeWithoutParameters(), (Charset) null);
|
||||
}
|
||||
|
||||
static Request indicesExist(GetIndexRequest request) {
|
||||
String endpoint = endpoint(request.indices(), Strings.EMPTY_ARRAY, "");
|
||||
Params params = Params.builder();
|
||||
params.withLocal(request.local());
|
||||
params.withHuman(request.humanReadable());
|
||||
params.withIndicesOptions(request.indicesOptions());
|
||||
params.withFlatSettings(request.flatSettings());
|
||||
params.withIncludeDefaults(request.includeDefaults());
|
||||
return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility class to build request's parameters map and centralize all parameter names.
|
||||
*/
|
||||
|
@ -633,7 +728,7 @@ public final class Request {
|
|||
if (indicesOptions.expandWildcardsOpen() == false && indicesOptions.expandWildcardsClosed() == false) {
|
||||
expandWildcards = "none";
|
||||
} else {
|
||||
StringJoiner joiner = new StringJoiner(",");
|
||||
StringJoiner joiner = new StringJoiner(",");
|
||||
if (indicesOptions.expandWildcardsOpen()) {
|
||||
joiner.add("open");
|
||||
}
|
||||
|
@ -646,6 +741,34 @@ public final class Request {
|
|||
return this;
|
||||
}
|
||||
|
||||
Params withHuman(boolean human) {
|
||||
if (human) {
|
||||
putParam("human", Boolean.toString(human));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withLocal(boolean local) {
|
||||
if (local) {
|
||||
putParam("local", Boolean.toString(local));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withFlatSettings(boolean flatSettings) {
|
||||
if (flatSettings) {
|
||||
return putParam("flat_settings", Boolean.TRUE.toString());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withIncludeDefaults(boolean includeDefaults) {
|
||||
if (includeDefaults) {
|
||||
return putParam("include_defaults", Boolean.TRUE.toString());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Map<String, String> getParams() {
|
||||
return Collections.unmodifiableMap(params);
|
||||
}
|
||||
|
|
|
@ -54,6 +54,8 @@ import org.elasticsearch.common.xcontent.ContextParser;
|
|||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.rankeval.RankEvalRequest;
|
||||
import org.elasticsearch.index.rankeval.RankEvalResponse;
|
||||
import org.elasticsearch.plugins.spi.NamedXContentProvider;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -467,6 +469,27 @@ public class RestHighLevelClient implements Closeable {
|
|||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a request using the Ranking Evaluation API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html">Ranking Evaluation API
|
||||
* on elastic.co</a>
|
||||
*/
|
||||
public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(rankEvalRequest, Request::rankEval, RankEvalResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a request using the Ranking Evaluation API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html">Ranking Evaluation API
|
||||
* on elastic.co</a>
|
||||
*/
|
||||
public final void rankEvalAsync(RankEvalRequest rankEvalRequest, ActionListener<RankEvalResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(rankEvalRequest, Request::rankEval, RankEvalResponse::fromXContent, listener, emptySet(),
|
||||
headers);
|
||||
}
|
||||
|
||||
protected final <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
@ -144,7 +145,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}";
|
||||
StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON);
|
||||
Response response = client().performRequest("PUT", "/index/type/id", Collections.singletonMap("refresh", "wait_for"), stringEntity);
|
||||
Response response = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id", Collections.singletonMap("refresh", "wait_for"),
|
||||
stringEntity);
|
||||
assertEquals(201, response.getStatusLine().getStatusCode());
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id");
|
||||
|
@ -172,7 +174,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}";
|
||||
StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON);
|
||||
Response response = client().performRequest("PUT", "/index/type/id", Collections.singletonMap("refresh", "wait_for"), stringEntity);
|
||||
Response response = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id", Collections.singletonMap("refresh", "wait_for"),
|
||||
stringEntity);
|
||||
assertEquals(201, response.getStatusLine().getStatusCode());
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id").version(2);
|
||||
|
@ -267,12 +270,13 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
String document = "{\"field\":\"value1\"}";
|
||||
StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON);
|
||||
Response r = client().performRequest("PUT", "/index/type/id1", Collections.singletonMap("refresh", "true"), stringEntity);
|
||||
Response r = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id1", Collections.singletonMap("refresh", "true"),
|
||||
stringEntity);
|
||||
assertEquals(201, r.getStatusLine().getStatusCode());
|
||||
|
||||
document = "{\"field\":\"value2\"}";
|
||||
stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON);
|
||||
r = client().performRequest("PUT", "/index/type/id2", Collections.singletonMap("refresh", "true"), stringEntity);
|
||||
r = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id2", Collections.singletonMap("refresh", "true"), stringEntity);
|
||||
assertEquals(201, r.getStatusLine().getStatusCode());
|
||||
|
||||
{
|
||||
|
|
|
@ -79,7 +79,6 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
|
|||
private CustomRestClient restHighLevelClient;
|
||||
|
||||
@Before
|
||||
@SuppressWarnings("unchecked")
|
||||
public void initClients() throws IOException {
|
||||
if (restHighLevelClient == null) {
|
||||
final RestClient restClient = mock(RestClient.class);
|
||||
|
|
|
@ -19,35 +19,99 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
|
||||
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.hamcrest.CoreMatchers.hasItem;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testIndicesExists() throws IOException {
|
||||
// Index present
|
||||
{
|
||||
String indexName = "test_index_exists_index_present";
|
||||
createIndex(indexName, Settings.EMPTY);
|
||||
|
||||
GetIndexRequest request = new GetIndexRequest();
|
||||
request.indices(indexName);
|
||||
|
||||
boolean response = execute(
|
||||
request,
|
||||
highLevelClient().indices()::exists,
|
||||
highLevelClient().indices()::existsAsync
|
||||
);
|
||||
assertTrue(response);
|
||||
}
|
||||
|
||||
// Index doesn't exist
|
||||
{
|
||||
String indexName = "non_existent_index";
|
||||
|
||||
GetIndexRequest request = new GetIndexRequest();
|
||||
request.indices(indexName);
|
||||
|
||||
boolean response = execute(
|
||||
request,
|
||||
highLevelClient().indices()::exists,
|
||||
highLevelClient().indices()::existsAsync
|
||||
);
|
||||
assertFalse(response);
|
||||
}
|
||||
|
||||
// One index exists, one doesn't
|
||||
{
|
||||
String existingIndex = "apples";
|
||||
createIndex(existingIndex, Settings.EMPTY);
|
||||
|
||||
String nonExistentIndex = "oranges";
|
||||
|
||||
GetIndexRequest request = new GetIndexRequest();
|
||||
request.indices(existingIndex, nonExistentIndex);
|
||||
|
||||
boolean response = execute(
|
||||
request,
|
||||
highLevelClient().indices()::exists,
|
||||
highLevelClient().indices()::existsAsync
|
||||
);
|
||||
assertFalse(response);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public void testCreateIndex() throws IOException {
|
||||
{
|
||||
// Create index
|
||||
|
@ -57,7 +121,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
|
||||
|
||||
CreateIndexResponse createIndexResponse =
|
||||
execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync);
|
||||
execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
|
||||
assertTrue(indexExists(indexName));
|
||||
|
@ -85,37 +149,30 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
createIndexRequest.mapping("type_name", mappingBuilder);
|
||||
|
||||
CreateIndexResponse createIndexResponse =
|
||||
execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync);
|
||||
execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
|
||||
Map<String, Object> indexMetaData = getIndexMetadata(indexName);
|
||||
Map<String, Object> getIndexResponse = getAsMap(indexName);
|
||||
assertEquals("2", XContentMapValues.extractValue(indexName + ".settings.index.number_of_replicas", getIndexResponse));
|
||||
|
||||
Map<String, Object> settingsData = (Map) indexMetaData.get("settings");
|
||||
Map<String, Object> indexSettings = (Map) settingsData.get("index");
|
||||
assertEquals("2", indexSettings.get("number_of_replicas"));
|
||||
|
||||
Map<String, Object> aliasesData = (Map) indexMetaData.get("aliases");
|
||||
Map<String, Object> aliasData = (Map) aliasesData.get("alias_name");
|
||||
Map<String, Object> aliasData =
|
||||
(Map<String, Object>)XContentMapValues.extractValue(indexName + ".aliases.alias_name", getIndexResponse);
|
||||
assertNotNull(aliasData);
|
||||
assertEquals("1", aliasData.get("index_routing"));
|
||||
Map<String, Object> filter = (Map) aliasData.get("filter");
|
||||
Map<String, Object> term = (Map) filter.get("term");
|
||||
assertEquals(2016, term.get("year"));
|
||||
|
||||
Map<String, Object> mappingsData = (Map) indexMetaData.get("mappings");
|
||||
Map<String, Object> typeData = (Map) mappingsData.get("type_name");
|
||||
Map<String, Object> properties = (Map) typeData.get("properties");
|
||||
Map<String, Object> field = (Map) properties.get("field");
|
||||
|
||||
assertEquals("text", field.get("type"));
|
||||
assertEquals("text", XContentMapValues.extractValue(indexName + ".mappings.type_name.properties.field.type", getIndexResponse));
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public void testPutMapping() throws IOException {
|
||||
{
|
||||
// Add mappings to index
|
||||
String indexName = "mapping_index";
|
||||
createIndex(indexName);
|
||||
createIndex(indexName, Settings.EMPTY);
|
||||
|
||||
PutMappingRequest putMappingRequest = new PutMappingRequest(indexName);
|
||||
putMappingRequest.type("type_name");
|
||||
|
@ -126,16 +183,11 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
putMappingRequest.source(mappingBuilder);
|
||||
|
||||
PutMappingResponse putMappingResponse =
|
||||
execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync);
|
||||
execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync);
|
||||
assertTrue(putMappingResponse.isAcknowledged());
|
||||
|
||||
Map<String, Object> indexMetaData = getIndexMetadata(indexName);
|
||||
Map<String, Object> mappingsData = (Map) indexMetaData.get("mappings");
|
||||
Map<String, Object> typeData = (Map) mappingsData.get("type_name");
|
||||
Map<String, Object> properties = (Map) typeData.get("properties");
|
||||
Map<String, Object> field = (Map) properties.get("field");
|
||||
|
||||
assertEquals("text", field.get("type"));
|
||||
Map<String, Object> getIndexResponse = getAsMap(indexName);
|
||||
assertEquals("text", XContentMapValues.extractValue(indexName + ".mappings.type_name.properties.field.type", getIndexResponse));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -143,11 +195,11 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
{
|
||||
// Delete index if exists
|
||||
String indexName = "test_index";
|
||||
createIndex(indexName);
|
||||
createIndex(indexName, Settings.EMPTY);
|
||||
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName);
|
||||
DeleteIndexResponse deleteIndexResponse =
|
||||
execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync);
|
||||
execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync);
|
||||
assertTrue(deleteIndexResponse.isAcknowledged());
|
||||
|
||||
assertFalse(indexExists(indexName));
|
||||
|
@ -160,16 +212,108 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(nonExistentIndex);
|
||||
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync));
|
||||
() -> execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testUpdateAliases() throws IOException {
|
||||
String index = "index";
|
||||
String alias = "alias";
|
||||
|
||||
createIndex(index, Settings.EMPTY);
|
||||
assertThat(aliasExists(index, alias), equalTo(false));
|
||||
assertThat(aliasExists(alias), equalTo(false));
|
||||
|
||||
IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest();
|
||||
AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index(index).aliases(alias);
|
||||
addAction.routing("routing").searchRouting("search_routing").filter("{\"term\":{\"year\":2016}}");
|
||||
aliasesAddRequest.addAliasAction(addAction);
|
||||
IndicesAliasesResponse aliasesAddResponse = execute(aliasesAddRequest, highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync);
|
||||
assertTrue(aliasesAddResponse.isAcknowledged());
|
||||
assertThat(aliasExists(alias), equalTo(true));
|
||||
assertThat(aliasExists(index, alias), equalTo(true));
|
||||
Map<String, Object> getAlias = getAlias(index, alias);
|
||||
assertThat(getAlias.get("index_routing"), equalTo("routing"));
|
||||
assertThat(getAlias.get("search_routing"), equalTo("search_routing"));
|
||||
Map<String, Object> filter = (Map<String, Object>) getAlias.get("filter");
|
||||
Map<String, Object> term = (Map<String, Object>) filter.get("term");
|
||||
assertEquals(2016, term.get("year"));
|
||||
|
||||
String alias2 = "alias2";
|
||||
IndicesAliasesRequest aliasesAddRemoveRequest = new IndicesAliasesRequest();
|
||||
addAction = new AliasActions(AliasActions.Type.ADD).indices(index).alias(alias2);
|
||||
aliasesAddRemoveRequest.addAliasAction(addAction);
|
||||
AliasActions removeAction = new AliasActions(AliasActions.Type.REMOVE).index(index).alias(alias);
|
||||
aliasesAddRemoveRequest.addAliasAction(removeAction);
|
||||
IndicesAliasesResponse aliasesAddRemoveResponse = execute(aliasesAddRemoveRequest, highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync);
|
||||
assertTrue(aliasesAddRemoveResponse.isAcknowledged());
|
||||
assertThat(aliasExists(alias), equalTo(false));
|
||||
assertThat(aliasExists(alias2), equalTo(true));
|
||||
assertThat(aliasExists(index, alias), equalTo(false));
|
||||
assertThat(aliasExists(index, alias2), equalTo(true));
|
||||
|
||||
IndicesAliasesRequest aliasesRemoveIndexRequest = new IndicesAliasesRequest();
|
||||
AliasActions removeIndexAction = new AliasActions(AliasActions.Type.REMOVE_INDEX).index(index);
|
||||
aliasesRemoveIndexRequest.addAliasAction(removeIndexAction);
|
||||
IndicesAliasesResponse aliasesRemoveIndexResponse = execute(aliasesRemoveIndexRequest, highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync);
|
||||
assertTrue(aliasesRemoveIndexResponse.isAcknowledged());
|
||||
assertThat(aliasExists(alias), equalTo(false));
|
||||
assertThat(aliasExists(alias2), equalTo(false));
|
||||
assertThat(aliasExists(index, alias), equalTo(false));
|
||||
assertThat(aliasExists(index, alias2), equalTo(false));
|
||||
assertThat(indexExists(index), equalTo(false));
|
||||
}
|
||||
|
||||
public void testAliasesNonExistentIndex() throws IOException {
|
||||
String index = "index";
|
||||
String alias = "alias";
|
||||
String nonExistentIndex = "non_existent_index";
|
||||
|
||||
IndicesAliasesRequest nonExistentIndexRequest = new IndicesAliasesRequest();
|
||||
nonExistentIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index(nonExistentIndex).alias(alias));
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(nonExistentIndexRequest,
|
||||
highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync));
|
||||
assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]"));
|
||||
assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex));
|
||||
|
||||
createIndex(index, Settings.EMPTY);
|
||||
IndicesAliasesRequest mixedRequest = new IndicesAliasesRequest();
|
||||
mixedRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).indices(index).aliases(alias));
|
||||
mixedRequest.addAliasAction(new AliasActions(AliasActions.Type.REMOVE).indices(nonExistentIndex).alias(alias));
|
||||
exception = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> execute(mixedRequest, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync));
|
||||
assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]"));
|
||||
assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex));
|
||||
assertThat(exception.getMetadata("es.index"), not(hasItem(index)));
|
||||
assertThat(aliasExists(index, alias), equalTo(false));
|
||||
assertThat(aliasExists(alias), equalTo(false));
|
||||
|
||||
IndicesAliasesRequest removeIndexRequest = new IndicesAliasesRequest();
|
||||
removeIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index(nonExistentIndex).alias(alias));
|
||||
removeIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.REMOVE_INDEX).indices(nonExistentIndex));
|
||||
exception = expectThrows(ElasticsearchException.class, () -> execute(removeIndexRequest, highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync));
|
||||
assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]"));
|
||||
assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex));
|
||||
assertThat(exception.getMetadata("es.index"), not(hasItem(index)));
|
||||
assertThat(aliasExists(index, alias), equalTo(false));
|
||||
assertThat(aliasExists(alias), equalTo(false));
|
||||
}
|
||||
|
||||
public void testOpenExistingIndex() throws IOException {
|
||||
String index = "index";
|
||||
createIndex(index);
|
||||
createIndex(index, Settings.EMPTY);
|
||||
closeIndex(index);
|
||||
ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search"));
|
||||
ResponseException exception = expectThrows(ResponseException.class,
|
||||
() -> client().performRequest(HttpGet.METHOD_NAME, index + "/_search"));
|
||||
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus()));
|
||||
assertThat(exception.getMessage().contains(index), equalTo(true));
|
||||
|
||||
|
@ -178,7 +322,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
highLevelClient().indices()::openAsync);
|
||||
assertTrue(openIndexResponse.isAcknowledged());
|
||||
|
||||
Response response = client().performRequest("GET", index + "/_search");
|
||||
Response response = client().performRequest(HttpGet.METHOD_NAME, index + "/_search");
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
}
|
||||
|
||||
|
@ -206,8 +350,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
public void testCloseExistingIndex() throws IOException {
|
||||
String index = "index";
|
||||
createIndex(index);
|
||||
Response response = client().performRequest("GET", index + "/_search");
|
||||
createIndex(index, Settings.EMPTY);
|
||||
Response response = client().performRequest(HttpGet.METHOD_NAME, index + "/_search");
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
|
||||
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(index);
|
||||
|
@ -215,7 +359,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
highLevelClient().indices()::closeAsync);
|
||||
assertTrue(closeIndexResponse.isAcknowledged());
|
||||
|
||||
ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search"));
|
||||
ResponseException exception = expectThrows(ResponseException.class,
|
||||
() -> client().performRequest(HttpGet.METHOD_NAME, index + "/_search"));
|
||||
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus()));
|
||||
assertThat(exception.getMessage().contains(index), equalTo(true));
|
||||
}
|
||||
|
@ -230,32 +375,65 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
|
||||
private static void createIndex(String index) throws IOException {
|
||||
Response response = client().performRequest("PUT", index);
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
}
|
||||
public void testExistsAlias() throws IOException {
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest("alias");
|
||||
assertFalse(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync));
|
||||
|
||||
private static boolean indexExists(String index) throws IOException {
|
||||
Response response = client().performRequest("HEAD", index);
|
||||
return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode();
|
||||
}
|
||||
createIndex("index", Settings.EMPTY);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index/_alias/alias");
|
||||
assertTrue(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync));
|
||||
|
||||
private static void closeIndex(String index) throws IOException {
|
||||
Response response = client().performRequest("POST", index + "/_close");
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
GetAliasesRequest getAliasesRequest2 = new GetAliasesRequest();
|
||||
getAliasesRequest2.aliases("alias");
|
||||
getAliasesRequest2.indices("index");
|
||||
assertTrue(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync));
|
||||
getAliasesRequest2.indices("does_not_exist");
|
||||
assertFalse(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private Map<String, Object> getIndexMetadata(String index) throws IOException {
|
||||
Response response = client().performRequest("GET", index);
|
||||
public void testShrink() throws IOException {
|
||||
Map<String, Object> nodes = getAsMap("_nodes");
|
||||
String firstNode = ((Map<String, Object>) nodes.get("nodes")).keySet().iterator().next();
|
||||
createIndex("source", Settings.builder().put("index.number_of_shards", 4).put("index.number_of_replicas", 0).build());
|
||||
updateIndexSettings("source", Settings.builder().put("index.routing.allocation.require._name", firstNode)
|
||||
.put("index.blocks.write", true));
|
||||
|
||||
XContentType entityContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue());
|
||||
Map<String, Object> responseEntity = XContentHelper.convertToMap(entityContentType.xContent(), response.getEntity().getContent(),
|
||||
false);
|
||||
|
||||
Map<String, Object> indexMetaData = (Map) responseEntity.get(index);
|
||||
assertNotNull(indexMetaData);
|
||||
|
||||
return indexMetaData;
|
||||
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
|
||||
resizeRequest.setResizeType(ResizeType.SHRINK);
|
||||
Settings targetSettings = Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build();
|
||||
resizeRequest.setTargetIndex(new CreateIndexRequest("target").settings(targetSettings).alias(new Alias("alias")));
|
||||
ResizeResponse resizeResponse = highLevelClient().indices().shrink(resizeRequest);
|
||||
assertTrue(resizeResponse.isAcknowledged());
|
||||
assertTrue(resizeResponse.isShardsAcknowledged());
|
||||
Map<String, Object> getIndexResponse = getAsMap("target");
|
||||
Map<String, Object> indexSettings = (Map<String, Object>)XContentMapValues.extractValue("target.settings.index", getIndexResponse);
|
||||
assertNotNull(indexSettings);
|
||||
assertEquals("2", indexSettings.get("number_of_shards"));
|
||||
assertEquals("0", indexSettings.get("number_of_replicas"));
|
||||
Map<String, Object> aliasData = (Map<String, Object>)XContentMapValues.extractValue("target.aliases.alias", getIndexResponse);
|
||||
assertNotNull(aliasData);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testSplit() throws IOException {
|
||||
createIndex("source", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0)
|
||||
.put("index.number_of_routing_shards", 4).build());
|
||||
updateIndexSettings("source", Settings.builder().put("index.blocks.write", true));
|
||||
|
||||
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
|
||||
resizeRequest.setResizeType(ResizeType.SPLIT);
|
||||
Settings targetSettings = Settings.builder().put("index.number_of_shards", 4).put("index.number_of_replicas", 0).build();
|
||||
resizeRequest.setTargetIndex(new CreateIndexRequest("target").settings(targetSettings).alias(new Alias("alias")));
|
||||
ResizeResponse resizeResponse = highLevelClient().indices().split(resizeRequest);
|
||||
assertTrue(resizeResponse.isAcknowledged());
|
||||
assertTrue(resizeResponse.isShardsAcknowledged());
|
||||
Map<String, Object> getIndexResponse = getAsMap("target");
|
||||
Map<String, Object> indexSettings = (Map<String, Object>)XContentMapValues.extractValue("target.settings.index", getIndexResponse);
|
||||
assertNotNull(indexSettings);
|
||||
assertEquals("4", indexSettings.get("number_of_shards"));
|
||||
assertEquals("0", indexSettings.get("number_of_replicas"));
|
||||
Map<String, Object> aliasData = (Map<String, Object>)XContentMapValues.extractValue("target.aliases.alias", getIndexResponse);
|
||||
assertNotNull(aliasData);
|
||||
}
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.elasticsearch.action.main.MainResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -34,7 +35,7 @@ public class PingAndInfoIT extends ESRestHighLevelClientTestCase {
|
|||
public void testInfo() throws IOException {
|
||||
MainResponse info = highLevelClient().info();
|
||||
// compare with what the low level client outputs
|
||||
Map<String, Object> infoAsMap = entityAsMap(adminClient().performRequest("GET", "/"));
|
||||
Map<String, Object> infoAsMap = entityAsMap(adminClient().performRequest(HttpGet.METHOD_NAME, "/"));
|
||||
assertEquals(infoAsMap.get("cluster_name"), info.getClusterName().value());
|
||||
assertEquals(infoAsMap.get("cluster_uuid"), info.getClusterUuid());
|
||||
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.rankeval.EvalQueryQuality;
|
||||
import org.elasticsearch.index.rankeval.PrecisionAtK;
|
||||
import org.elasticsearch.index.rankeval.RankEvalRequest;
|
||||
import org.elasticsearch.index.rankeval.RankEvalResponse;
|
||||
import org.elasticsearch.index.rankeval.RankEvalSpec;
|
||||
import org.elasticsearch.index.rankeval.RatedDocument;
|
||||
import org.elasticsearch.index.rankeval.RatedRequest;
|
||||
import org.elasticsearch.index.rankeval.RatedSearchHit;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.index.rankeval.EvaluationMetric.filterUnknownDocuments;
|
||||
|
||||
public class RankEvalIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@Before
|
||||
public void indexDocuments() throws IOException {
|
||||
StringEntity doc = new StringEntity("{\"text\":\"berlin\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index/doc/1", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"text\":\"amsterdam\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index/doc/2", Collections.emptyMap(), doc);
|
||||
client().performRequest("PUT", "/index/doc/3", Collections.emptyMap(), doc);
|
||||
client().performRequest("PUT", "/index/doc/4", Collections.emptyMap(), doc);
|
||||
client().performRequest("PUT", "/index/doc/5", Collections.emptyMap(), doc);
|
||||
client().performRequest("PUT", "/index/doc/6", Collections.emptyMap(), doc);
|
||||
client().performRequest("POST", "/index/_refresh");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test cases retrieves all six documents indexed above and checks the Prec@10
|
||||
* calculation where all unlabeled documents are treated as not relevant.
|
||||
*/
|
||||
public void testRankEvalRequest() throws IOException {
|
||||
SearchSourceBuilder testQuery = new SearchSourceBuilder();
|
||||
testQuery.query(new MatchAllQueryBuilder());
|
||||
RatedRequest amsterdamRequest = new RatedRequest("amsterdam_query", createRelevant("index" , "2", "3", "4", "5"), testQuery);
|
||||
RatedRequest berlinRequest = new RatedRequest("berlin_query", createRelevant("index", "1"), testQuery);
|
||||
List<RatedRequest> specifications = new ArrayList<>();
|
||||
specifications.add(amsterdamRequest);
|
||||
specifications.add(berlinRequest);
|
||||
PrecisionAtK metric = new PrecisionAtK(1, false, 10);
|
||||
RankEvalSpec spec = new RankEvalSpec(specifications, metric);
|
||||
spec.addIndices(Collections.singletonList("index"));
|
||||
|
||||
RankEvalResponse response = execute(new RankEvalRequest(spec), highLevelClient()::rankEval, highLevelClient()::rankEvalAsync);
|
||||
// the expected Prec@ for the first query is 4/6 and the expected Prec@ for the second is 1/6, divided by 2 to get the average
|
||||
double expectedPrecision = (1.0 / 6.0 + 4.0 / 6.0) / 2.0;
|
||||
assertEquals(expectedPrecision, response.getEvaluationResult(), Double.MIN_VALUE);
|
||||
Set<Entry<String, EvalQueryQuality>> entrySet = response.getPartialResults().entrySet();
|
||||
assertEquals(2, entrySet.size());
|
||||
for (Entry<String, EvalQueryQuality> entry : entrySet) {
|
||||
EvalQueryQuality quality = entry.getValue();
|
||||
if (entry.getKey() == "amsterdam_query") {
|
||||
assertEquals(2, filterUnknownDocuments(quality.getHitsAndRatings()).size());
|
||||
List<RatedSearchHit> hitsAndRatings = quality.getHitsAndRatings();
|
||||
assertEquals(6, hitsAndRatings.size());
|
||||
for (RatedSearchHit hit : hitsAndRatings) {
|
||||
String id = hit.getSearchHit().getId();
|
||||
if (id.equals("1") || id.equals("6")) {
|
||||
assertFalse(hit.getRating().isPresent());
|
||||
} else {
|
||||
assertEquals(1, hit.getRating().get().intValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (entry.getKey() == "berlin_query") {
|
||||
assertEquals(5, filterUnknownDocuments(quality.getHitsAndRatings()).size());
|
||||
List<RatedSearchHit> hitsAndRatings = quality.getHitsAndRatings();
|
||||
assertEquals(6, hitsAndRatings.size());
|
||||
for (RatedSearchHit hit : hitsAndRatings) {
|
||||
String id = hit.getSearchHit().getId();
|
||||
if (id.equals("1")) {
|
||||
assertEquals(1, hit.getRating().get().intValue());
|
||||
} else {
|
||||
assertFalse(hit.getRating().isPresent());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static List<RatedDocument> createRelevant(String indexName, String... docs) {
|
||||
List<RatedDocument> relevant = new ArrayList<>();
|
||||
for (String doc : docs) {
|
||||
relevant.add(new RatedDocument(indexName, doc, 1));
|
||||
}
|
||||
return relevant;
|
||||
}
|
||||
}
|
|
@ -20,16 +20,27 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.client.methods.HttpDelete;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.client.methods.HttpHead;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
|
||||
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
|
@ -45,10 +56,12 @@ import org.elasticsearch.action.support.ActiveShardCount;
|
|||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.common.CheckedBiConsumer;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -62,6 +75,11 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
import org.elasticsearch.index.rankeval.PrecisionAtK;
|
||||
import org.elasticsearch.index.rankeval.RankEvalRequest;
|
||||
import org.elasticsearch.index.rankeval.RankEvalSpec;
|
||||
import org.elasticsearch.index.rankeval.RatedRequest;
|
||||
import org.elasticsearch.index.rankeval.RestRankEvalAction;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||
|
@ -81,6 +99,8 @@ import java.io.InputStream;
|
|||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
@ -93,6 +113,10 @@ import java.util.function.Supplier;
|
|||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.client.Request.REQUEST_BODY_CONTENT_TYPE;
|
||||
import static org.elasticsearch.client.Request.enforceSameContentType;
|
||||
import static org.elasticsearch.index.RandomCreateIndexGenerator.randomAliases;
|
||||
import static org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest;
|
||||
import static org.elasticsearch.index.RandomCreateIndexGenerator.randomIndexSettings;
|
||||
import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomAliasAction;
|
||||
import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
@ -135,7 +159,7 @@ public class RequestTests extends ESTestCase {
|
|||
assertEquals("/", request.getEndpoint());
|
||||
assertEquals(0, request.getParameters().size());
|
||||
assertNull(request.getEntity());
|
||||
assertEquals("HEAD", request.getMethod());
|
||||
assertEquals(HttpHead.METHOD_NAME, request.getMethod());
|
||||
}
|
||||
|
||||
public void testInfo() {
|
||||
|
@ -143,11 +167,11 @@ public class RequestTests extends ESTestCase {
|
|||
assertEquals("/", request.getEndpoint());
|
||||
assertEquals(0, request.getParameters().size());
|
||||
assertNull(request.getEntity());
|
||||
assertEquals("GET", request.getMethod());
|
||||
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
|
||||
}
|
||||
|
||||
public void testGet() {
|
||||
getAndExistsTest(Request::get, "GET");
|
||||
getAndExistsTest(Request::get, HttpGet.METHOD_NAME);
|
||||
}
|
||||
|
||||
public void testMultiGet() throws IOException {
|
||||
|
@ -197,7 +221,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
Request request = Request.multiGet(multiGetRequest);
|
||||
assertEquals("GET", request.getMethod());
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_mget", request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertToXContentBody(multiGetRequest, request.getEntity());
|
||||
|
@ -232,12 +256,32 @@ public class RequestTests extends ESTestCase {
|
|||
Request request = Request.delete(deleteRequest);
|
||||
assertEquals("/" + index + "/" + type + "/" + id, request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals("DELETE", request.getMethod());
|
||||
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
|
||||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
public void testExists() {
|
||||
getAndExistsTest(Request::exists, "HEAD");
|
||||
getAndExistsTest(Request::exists, HttpHead.METHOD_NAME);
|
||||
}
|
||||
|
||||
public void testIndicesExist() {
|
||||
String[] indices = randomIndicesNames(1, 10);
|
||||
|
||||
GetIndexRequest getIndexRequest = new GetIndexRequest().indices(indices);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomIndicesOptions(getIndexRequest::indicesOptions, getIndexRequest::indicesOptions, expectedParams);
|
||||
setRandomLocal(getIndexRequest, expectedParams);
|
||||
setRandomFlatSettings(getIndexRequest, expectedParams);
|
||||
setRandomHumanReadable(getIndexRequest, expectedParams);
|
||||
setRandomIncludeDefaults(getIndexRequest, expectedParams);
|
||||
|
||||
final Request request = Request.indicesExist(getIndexRequest);
|
||||
|
||||
assertEquals(HttpHead.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/" + String.join(",", indices), request.getEndpoint());
|
||||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
private static void getAndExistsTest(Function<GetRequest, Request> requestConverter, String method) {
|
||||
|
@ -299,33 +343,39 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testCreateIndex() throws IOException {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
|
||||
String indexName = "index-" + randomAlphaOfLengthBetween(2, 5);
|
||||
|
||||
createIndexRequest.index(indexName);
|
||||
CreateIndexRequest createIndexRequest = randomCreateIndexRequest();
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
||||
setRandomTimeout(createIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
setRandomMasterTimeout(createIndexRequest, expectedParams);
|
||||
setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams);
|
||||
|
||||
Request request = Request.createIndex(createIndexRequest);
|
||||
assertEquals("/" + indexName, request.getEndpoint());
|
||||
assertEquals("/" + createIndexRequest.index(), request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals("PUT", request.getMethod());
|
||||
assertEquals(HttpPut.METHOD_NAME, request.getMethod());
|
||||
assertToXContentBody(createIndexRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testUpdateAliases() throws IOException {
|
||||
IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
|
||||
AliasActions aliasAction = randomAliasAction();
|
||||
indicesAliasesRequest.addAliasAction(aliasAction);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomTimeout(indicesAliasesRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
setRandomMasterTimeout(indicesAliasesRequest, expectedParams);
|
||||
|
||||
Request request = Request.updateAliases(indicesAliasesRequest);
|
||||
assertEquals("/_aliases", request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertToXContentBody(indicesAliasesRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testPutMapping() throws IOException {
|
||||
PutMappingRequest putMappingRequest = new PutMappingRequest();
|
||||
|
||||
int numIndices = randomIntBetween(0, 5);
|
||||
String[] indices = new String[numIndices];
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5);
|
||||
}
|
||||
String[] indices = randomIndicesNames(0, 5);
|
||||
putMappingRequest.indices(indices);
|
||||
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
|
@ -347,7 +397,7 @@ public class RequestTests extends ESTestCase {
|
|||
assertEquals(endpoint.toString(), request.getEndpoint());
|
||||
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals("PUT", request.getMethod());
|
||||
assertEquals(HttpPut.METHOD_NAME, request.getMethod());
|
||||
assertToXContentBody(putMappingRequest, request.getEntity());
|
||||
}
|
||||
|
||||
|
@ -364,7 +414,7 @@ public class RequestTests extends ESTestCase {
|
|||
Request request = Request.deleteIndex(deleteIndexRequest);
|
||||
assertEquals("/" + String.join(",", indices), request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals("DELETE", request.getMethod());
|
||||
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
|
||||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
|
@ -383,7 +433,7 @@ public class RequestTests extends ESTestCase {
|
|||
StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_open");
|
||||
assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
|
||||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
assertThat(request.getMethod(), equalTo("POST"));
|
||||
assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
|
||||
assertThat(request.getEntity(), nullValue());
|
||||
}
|
||||
|
||||
|
@ -400,7 +450,7 @@ public class RequestTests extends ESTestCase {
|
|||
StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_close");
|
||||
assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
|
||||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
assertThat(request.getMethod(), equalTo("POST"));
|
||||
assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
|
||||
assertThat(request.getEntity(), nullValue());
|
||||
}
|
||||
|
||||
|
@ -414,9 +464,9 @@ public class RequestTests extends ESTestCase {
|
|||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
||||
String method = "POST";
|
||||
String method = HttpPost.METHOD_NAME;
|
||||
if (id != null) {
|
||||
method = "PUT";
|
||||
method = HttpPut.METHOD_NAME;
|
||||
if (randomBoolean()) {
|
||||
indexRequest.opType(DocWriteRequest.OpType.CREATE);
|
||||
}
|
||||
|
@ -551,7 +601,7 @@ public class RequestTests extends ESTestCase {
|
|||
Request request = Request.update(updateRequest);
|
||||
assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals("POST", request.getMethod());
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
|
||||
HttpEntity entity = request.getEntity();
|
||||
assertTrue(entity instanceof ByteArrayEntity);
|
||||
|
@ -665,7 +715,7 @@ public class RequestTests extends ESTestCase {
|
|||
Request request = Request.bulk(bulkRequest);
|
||||
assertEquals("/_bulk", request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals("POST", request.getMethod());
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
|
||||
byte[] content = new byte[(int) request.getEntity().getContentLength()];
|
||||
try (InputStream inputStream = request.getEntity().getContent()) {
|
||||
|
@ -780,6 +830,14 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSearchNullSource() throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
Request request = Request.search(searchRequest);
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_search", request.getEndpoint());
|
||||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
public void testSearch() throws Exception {
|
||||
String[] indices = randomIndicesNames(0, 5);
|
||||
SearchRequest searchRequest = new SearchRequest(indices);
|
||||
|
@ -809,6 +867,10 @@ public class RequestTests extends ESTestCase {
|
|||
searchRequest.requestCache(randomBoolean());
|
||||
expectedParams.put("request_cache", Boolean.toString(searchRequest.requestCache()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.allowPartialSearchResults(randomBoolean());
|
||||
expectedParams.put("allow_partial_search_results", Boolean.toString(searchRequest.allowPartialSearchResults()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.setBatchedReduceSize(randomIntBetween(2, Integer.MAX_VALUE));
|
||||
}
|
||||
|
@ -876,6 +938,7 @@ public class RequestTests extends ESTestCase {
|
|||
endpoint.add(type);
|
||||
}
|
||||
endpoint.add("_search");
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals(endpoint.toString(), request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertToXContentBody(searchSourceBuilder, request.getEntity());
|
||||
|
@ -914,6 +977,7 @@ public class RequestTests extends ESTestCase {
|
|||
|
||||
Request request = Request.multiSearch(multiSearchRequest);
|
||||
assertEquals("/_msearch", request.getEndpoint());
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
|
||||
List<SearchRequest> requests = new ArrayList<>();
|
||||
|
@ -937,7 +1001,7 @@ public class RequestTests extends ESTestCase {
|
|||
searchScrollRequest.scroll(randomPositiveTimeValue());
|
||||
}
|
||||
Request request = Request.searchScroll(searchScrollRequest);
|
||||
assertEquals("GET", request.getMethod());
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_search/scroll", request.getEndpoint());
|
||||
assertEquals(0, request.getParameters().size());
|
||||
assertToXContentBody(searchScrollRequest, request.getEntity());
|
||||
|
@ -951,13 +1015,125 @@ public class RequestTests extends ESTestCase {
|
|||
clearScrollRequest.addScrollId(randomAlphaOfLengthBetween(5, 10));
|
||||
}
|
||||
Request request = Request.clearScroll(clearScrollRequest);
|
||||
assertEquals("DELETE", request.getMethod());
|
||||
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_search/scroll", request.getEndpoint());
|
||||
assertEquals(0, request.getParameters().size());
|
||||
assertToXContentBody(clearScrollRequest, request.getEntity());
|
||||
assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
|
||||
}
|
||||
|
||||
public void testExistsAlias() {
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest();
|
||||
String[] indices = randomIndicesNames(0, 5);
|
||||
getAliasesRequest.indices(indices);
|
||||
//the HEAD endpoint requires at least an alias or an index
|
||||
String[] aliases = randomIndicesNames(indices.length == 0 ? 1 : 0, 5);
|
||||
getAliasesRequest.aliases(aliases);
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomLocal(getAliasesRequest, expectedParams);
|
||||
setRandomIndicesOptions(getAliasesRequest::indicesOptions, getAliasesRequest::indicesOptions, expectedParams);
|
||||
|
||||
Request request = Request.existsAlias(getAliasesRequest);
|
||||
StringJoiner expectedEndpoint = new StringJoiner("/", "/", "");
|
||||
String index = String.join(",", indices);
|
||||
if (Strings.hasLength(index)) {
|
||||
expectedEndpoint.add(index);
|
||||
}
|
||||
expectedEndpoint.add("_alias");
|
||||
String alias = String.join(",", aliases);
|
||||
if (Strings.hasLength(alias)) {
|
||||
expectedEndpoint.add(alias);
|
||||
}
|
||||
assertEquals(HttpHead.METHOD_NAME, request.getMethod());
|
||||
assertEquals(expectedEndpoint.toString(), request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
public void testExistsAliasNoAliasNoIndex() {
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest();
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.existsAlias(getAliasesRequest));
|
||||
assertEquals("existsAlias requires at least an alias or an index", iae.getMessage());
|
||||
}
|
||||
|
||||
public void testRankEval() throws Exception {
|
||||
RankEvalSpec spec = new RankEvalSpec(
|
||||
Collections.singletonList(new RatedRequest("queryId", Collections.emptyList(), new SearchSourceBuilder())),
|
||||
new PrecisionAtK());
|
||||
String[] indices = randomIndicesNames(0, 5);
|
||||
spec.addIndices(Arrays.asList(indices));
|
||||
RankEvalRequest rankEvalRequest = new RankEvalRequest(spec);
|
||||
|
||||
Request request = Request.rankEval(rankEvalRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
String index = String.join(",", indices);
|
||||
if (Strings.hasLength(index)) {
|
||||
endpoint.add(index);
|
||||
}
|
||||
endpoint.add(RestRankEvalAction.ENDPOINT);
|
||||
assertEquals(endpoint.toString(), request.getEndpoint());
|
||||
assertEquals(Collections.emptyMap(), request.getParameters());
|
||||
assertToXContentBody(spec, request.getEntity());
|
||||
}
|
||||
|
||||
public void testSplit() throws IOException {
|
||||
resizeTest(ResizeType.SPLIT, Request::split);
|
||||
}
|
||||
|
||||
public void testSplitWrongResizeType() {
|
||||
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
|
||||
resizeRequest.setResizeType(ResizeType.SHRINK);
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.split(resizeRequest));
|
||||
assertEquals("Wrong resize type [SHRINK] for indices split request", iae.getMessage());
|
||||
}
|
||||
|
||||
public void testShrinkWrongResizeType() {
|
||||
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
|
||||
resizeRequest.setResizeType(ResizeType.SPLIT);
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.shrink(resizeRequest));
|
||||
assertEquals("Wrong resize type [SPLIT] for indices shrink request", iae.getMessage());
|
||||
}
|
||||
|
||||
public void testShrink() throws IOException {
|
||||
resizeTest(ResizeType.SHRINK, Request::shrink);
|
||||
}
|
||||
|
||||
private static void resizeTest(ResizeType resizeType, CheckedFunction<ResizeRequest, Request, IOException> function)
|
||||
throws IOException {
|
||||
String[] indices = randomIndicesNames(2, 2);
|
||||
ResizeRequest resizeRequest = new ResizeRequest(indices[0], indices[1]);
|
||||
resizeRequest.setResizeType(resizeType);
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomMasterTimeout(resizeRequest, expectedParams);
|
||||
setRandomTimeout(resizeRequest::timeout, resizeRequest.timeout(), expectedParams);
|
||||
|
||||
if (randomBoolean()) {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(randomAlphaOfLengthBetween(3, 10));
|
||||
if (randomBoolean()) {
|
||||
createIndexRequest.settings(randomIndexSettings());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
randomAliases(createIndexRequest);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams);
|
||||
}
|
||||
resizeRequest.setTargetIndex(createIndexRequest);
|
||||
} else {
|
||||
if (randomBoolean()) {
|
||||
setRandomWaitForActiveShards(resizeRequest::setWaitForActiveShards, expectedParams);
|
||||
}
|
||||
}
|
||||
|
||||
Request request = function.apply(resizeRequest);
|
||||
assertEquals(HttpPut.METHOD_NAME, request.getMethod());
|
||||
String expectedEndpoint = "/" + resizeRequest.getSourceIndex() + "/_" + resizeType.name().toLowerCase(Locale.ROOT) + "/"
|
||||
+ resizeRequest.getTargetIndexRequest().index();
|
||||
assertEquals(expectedEndpoint, request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertToXContentBody(resizeRequest, request.getEntity());
|
||||
}
|
||||
|
||||
private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException {
|
||||
BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false);
|
||||
assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue());
|
||||
|
@ -992,14 +1168,25 @@ public class RequestTests extends ESTestCase {
|
|||
assertEquals("1", requestParams.values().iterator().next());
|
||||
}
|
||||
|
||||
public void testBuildEndpoint() {
|
||||
assertEquals("/", Request.buildEndpoint());
|
||||
assertEquals("/", Request.buildEndpoint(Strings.EMPTY_ARRAY));
|
||||
assertEquals("/", Request.buildEndpoint(""));
|
||||
assertEquals("/a/b", Request.buildEndpoint("a", "b"));
|
||||
assertEquals("/a/b/_create", Request.buildEndpoint("a", "b", "_create"));
|
||||
assertEquals("/a/b/c/_create", Request.buildEndpoint("a", "b", "c", "_create"));
|
||||
assertEquals("/a/_create", Request.buildEndpoint("a", null, null, "_create"));
|
||||
}
|
||||
|
||||
public void testEndpoint() {
|
||||
assertEquals("/", Request.endpoint());
|
||||
assertEquals("/", Request.endpoint(Strings.EMPTY_ARRAY));
|
||||
assertEquals("/", Request.endpoint(""));
|
||||
assertEquals("/a/b", Request.endpoint("a", "b"));
|
||||
assertEquals("/a/b/_create", Request.endpoint("a", "b", "_create"));
|
||||
assertEquals("/a/b/c/_create", Request.endpoint("a", "b", "c", "_create"));
|
||||
assertEquals("/a/_create", Request.endpoint("a", null, null, "_create"));
|
||||
assertEquals("/index/type/id", Request.endpoint("index", "type", "id"));
|
||||
assertEquals("/index/type/id/_endpoint", Request.endpoint("index", "type", "id", "_endpoint"));
|
||||
assertEquals("/index1,index2", Request.endpoint(new String[]{"index1", "index2"}));
|
||||
assertEquals("/index1,index2/_endpoint", Request.endpoint(new String[]{"index1", "index2"}, "_endpoint"));
|
||||
assertEquals("/index1,index2/type1,type2/_endpoint", Request.endpoint(new String[]{"index1", "index2"},
|
||||
new String[]{"type1", "type2"}, "_endpoint"));
|
||||
assertEquals("/index1,index2/_endpoint/suffix1,suffix2", Request.endpoint(new String[]{"index1", "index2"},
|
||||
"_endpoint", new String[]{"suffix1", "suffix2"}));
|
||||
}
|
||||
|
||||
public void testCreateContentType() {
|
||||
|
@ -1082,6 +1269,46 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static void setRandomIncludeDefaults(GetIndexRequest request, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
boolean includeDefaults = randomBoolean();
|
||||
request.includeDefaults(includeDefaults);
|
||||
if (includeDefaults) {
|
||||
expectedParams.put("include_defaults", String.valueOf(includeDefaults));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void setRandomHumanReadable(GetIndexRequest request, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
boolean humanReadable = randomBoolean();
|
||||
request.humanReadable(humanReadable);
|
||||
if (humanReadable) {
|
||||
expectedParams.put("human", String.valueOf(humanReadable));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void setRandomFlatSettings(GetIndexRequest request, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
boolean flatSettings = randomBoolean();
|
||||
request.flatSettings(flatSettings);
|
||||
if (flatSettings) {
|
||||
expectedParams.put("flat_settings", String.valueOf(flatSettings));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void setRandomLocal(MasterNodeReadRequest<?> request, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
boolean local = randomBoolean();
|
||||
request.local(local);
|
||||
if (local) {
|
||||
expectedParams.put("local", String.valueOf(local));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void setRandomTimeout(Consumer<String> setter, TimeValue defaultTimeout, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
String timeout = randomTimeValue();
|
||||
|
|
|
@ -44,7 +44,7 @@ public class RestHighLevelClientExtTests extends ESTestCase {
|
|||
private RestHighLevelClient restHighLevelClient;
|
||||
|
||||
@Before
|
||||
public void initClient() throws IOException {
|
||||
public void initClient() {
|
||||
RestClient restClient = mock(RestClient.class);
|
||||
restHighLevelClient = new RestHighLevelClientExt(restClient);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,10 @@ import org.apache.http.HttpResponse;
|
|||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.client.methods.HttpDelete;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.client.methods.HttpHead;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
|
@ -60,6 +64,7 @@ import org.elasticsearch.common.xcontent.smile.SmileXContent;
|
|||
import org.elasticsearch.index.rankeval.DiscountedCumulativeGain;
|
||||
import org.elasticsearch.index.rankeval.EvaluationMetric;
|
||||
import org.elasticsearch.index.rankeval.MeanReciprocalRank;
|
||||
import org.elasticsearch.index.rankeval.MetricDetail;
|
||||
import org.elasticsearch.index.rankeval.PrecisionAtK;
|
||||
import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -104,7 +109,7 @@ import static org.mockito.Mockito.when;
|
|||
public class RestHighLevelClientTests extends ESTestCase {
|
||||
|
||||
private static final ProtocolVersion HTTP_PROTOCOL = new ProtocolVersion("http", 1, 1);
|
||||
private static final RequestLine REQUEST_LINE = new BasicRequestLine("GET", "/", HTTP_PROTOCOL);
|
||||
private static final RequestLine REQUEST_LINE = new BasicRequestLine(HttpGet.METHOD_NAME, "/", HTTP_PROTOCOL);
|
||||
|
||||
private RestClient restClient;
|
||||
private RestHighLevelClient restHighLevelClient;
|
||||
|
@ -131,7 +136,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(response);
|
||||
assertTrue(restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), eq(Collections.emptyMap()),
|
||||
verify(restClient).performRequest(eq(HttpHead.METHOD_NAME), eq("/"), eq(Collections.emptyMap()),
|
||||
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
|
@ -142,7 +147,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(response);
|
||||
assertFalse(restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), eq(Collections.emptyMap()),
|
||||
verify(restClient).performRequest(eq(HttpHead.METHOD_NAME), eq("/"), eq(Collections.emptyMap()),
|
||||
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
|
@ -151,7 +156,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(new SocketTimeoutException());
|
||||
expectThrows(SocketTimeoutException.class, () -> restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), eq(Collections.emptyMap()),
|
||||
verify(restClient).performRequest(eq(HttpHead.METHOD_NAME), eq("/"), eq(Collections.emptyMap()),
|
||||
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
|
@ -162,7 +167,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
mockResponse(testInfo);
|
||||
MainResponse receivedInfo = restHighLevelClient.info(headers);
|
||||
assertEquals(testInfo, receivedInfo);
|
||||
verify(restClient).performRequest(eq("GET"), eq("/"), eq(Collections.emptyMap()),
|
||||
verify(restClient).performRequest(eq(HttpGet.METHOD_NAME), eq("/"), eq(Collections.emptyMap()),
|
||||
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
|
@ -179,7 +184,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
assertEquals(5, searchResponse.getTotalShards());
|
||||
assertEquals(5, searchResponse.getSuccessfulShards());
|
||||
assertEquals(100, searchResponse.getTook().getMillis());
|
||||
verify(restClient).performRequest(eq("GET"), eq("/_search/scroll"), eq(Collections.emptyMap()),
|
||||
verify(restClient).performRequest(eq(HttpPost.METHOD_NAME), eq("/_search/scroll"), eq(Collections.emptyMap()),
|
||||
isNotNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
|
@ -192,7 +197,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
ClearScrollResponse clearScrollResponse = restHighLevelClient.clearScroll(clearScrollRequest, headers);
|
||||
assertEquals(mockClearScrollResponse.isSucceeded(), clearScrollResponse.isSucceeded());
|
||||
assertEquals(mockClearScrollResponse.getNumFreed(), clearScrollResponse.getNumFreed());
|
||||
verify(restClient).performRequest(eq("DELETE"), eq("/_search/scroll"), eq(Collections.emptyMap()),
|
||||
verify(restClient).performRequest(eq(HttpDelete.METHOD_NAME), eq("/_search/scroll"), eq(Collections.emptyMap()),
|
||||
isNotNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
|
@ -331,7 +336,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
public void testPerformRequestOnSuccess() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
|
@ -353,7 +358,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
|
@ -371,7 +376,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
public void testPerformRequestOnResponseExceptionWithEntity() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}",
|
||||
|
@ -391,7 +396,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON));
|
||||
|
@ -411,7 +416,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON));
|
||||
|
@ -431,7 +436,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
|
@ -445,7 +450,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
|
@ -462,7 +467,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request("GET", "/", Collections.emptyMap(), null);
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}",
|
||||
ContentType.APPLICATION_JSON));
|
||||
|
@ -652,7 +657,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testProvidedNamedXContents() {
|
||||
List<NamedXContentRegistry.Entry> namedXContents = RestHighLevelClient.getProvidedNamedXContents();
|
||||
assertEquals(5, namedXContents.size());
|
||||
assertEquals(7, namedXContents.size());
|
||||
Map<Class<?>, Integer> categories = new HashMap<>();
|
||||
List<String> names = new ArrayList<>();
|
||||
for (NamedXContentRegistry.Entry namedXContent : namedXContents) {
|
||||
|
@ -662,7 +667,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
categories.put(namedXContent.categoryClass, counter + 1);
|
||||
}
|
||||
}
|
||||
assertEquals(2, categories.size());
|
||||
assertEquals(3, categories.size());
|
||||
assertEquals(Integer.valueOf(2), categories.get(Aggregation.class));
|
||||
assertTrue(names.contains(ChildrenAggregationBuilder.NAME));
|
||||
assertTrue(names.contains(MatrixStatsAggregationBuilder.NAME));
|
||||
|
@ -670,6 +675,9 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
assertTrue(names.contains(PrecisionAtK.NAME));
|
||||
assertTrue(names.contains(DiscountedCumulativeGain.NAME));
|
||||
assertTrue(names.contains(MeanReciprocalRank.NAME));
|
||||
assertEquals(Integer.valueOf(2), categories.get(MetricDetail.class));
|
||||
assertTrue(names.contains(PrecisionAtK.NAME));
|
||||
assertTrue(names.contains(MeanReciprocalRank.NAME));
|
||||
}
|
||||
|
||||
private static class TrackingActionListener implements ActionListener<Integer> {
|
||||
|
|
|
@ -20,10 +20,11 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
|
@ -35,9 +36,7 @@ import org.elasticsearch.action.search.SearchResponse;
|
|||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||
import org.elasticsearch.index.query.NestedQueryBuilder;
|
||||
import org.elasticsearch.index.query.ScriptQueryBuilder;
|
||||
import org.elasticsearch.index.query.TermsQueryBuilder;
|
||||
import org.elasticsearch.join.aggregations.Children;
|
||||
|
@ -66,6 +65,8 @@ import org.junit.Before;
|
|||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.both;
|
||||
|
@ -83,30 +84,30 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
@Before
|
||||
public void indexDocuments() throws IOException {
|
||||
StringEntity doc1 = new StringEntity("{\"type\":\"type1\", \"num\":10, \"num2\":50}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index/type/1", Collections.emptyMap(), doc1);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index/type/1", Collections.emptyMap(), doc1);
|
||||
StringEntity doc2 = new StringEntity("{\"type\":\"type1\", \"num\":20, \"num2\":40}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index/type/2", Collections.emptyMap(), doc2);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index/type/2", Collections.emptyMap(), doc2);
|
||||
StringEntity doc3 = new StringEntity("{\"type\":\"type1\", \"num\":50, \"num2\":35}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index/type/3", Collections.emptyMap(), doc3);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index/type/3", Collections.emptyMap(), doc3);
|
||||
StringEntity doc4 = new StringEntity("{\"type\":\"type2\", \"num\":100, \"num2\":10}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index/type/4", Collections.emptyMap(), doc4);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index/type/4", Collections.emptyMap(), doc4);
|
||||
StringEntity doc5 = new StringEntity("{\"type\":\"type2\", \"num\":100, \"num2\":10}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index/type/5", Collections.emptyMap(), doc5);
|
||||
client().performRequest("POST", "/index/_refresh");
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index/type/5", Collections.emptyMap(), doc5);
|
||||
client().performRequest(HttpPost.METHOD_NAME, "/index/_refresh");
|
||||
|
||||
StringEntity doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index1/doc/1", Collections.emptyMap(), doc);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index1/doc/1", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index1/doc/2", Collections.emptyMap(), doc);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index1/doc/2", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index2/doc/3", Collections.emptyMap(), doc);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index2/doc/3", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index2/doc/4", Collections.emptyMap(), doc);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index2/doc/4", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index3/doc/5", Collections.emptyMap(), doc);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index3/doc/5", Collections.emptyMap(), doc);
|
||||
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/index3/doc/6", Collections.emptyMap(), doc);
|
||||
client().performRequest("POST", "/index1,index2,index3/_refresh");
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index3/doc/6", Collections.emptyMap(), doc);
|
||||
client().performRequest(HttpPost.METHOD_NAME, "/index1,index2,index3/_refresh");
|
||||
}
|
||||
|
||||
public void testSearchNoQuery() throws IOException {
|
||||
|
@ -316,7 +317,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
" }\n" +
|
||||
" }" +
|
||||
"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/" + indexName, Collections.emptyMap(), parentMapping);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/" + indexName, Collections.emptyMap(), parentMapping);
|
||||
StringEntity questionDoc = new StringEntity("{\n" +
|
||||
" \"body\": \"<p>I have Windows 2003 server and i bought a new Windows 2008 server...\",\n" +
|
||||
" \"title\": \"Whats the best way to file transfer my site from server to a newer one?\",\n" +
|
||||
|
@ -327,7 +328,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
" ],\n" +
|
||||
" \"qa_join_field\" : \"question\"\n" +
|
||||
"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/" + indexName + "/qa/1", Collections.emptyMap(), questionDoc);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/" + indexName + "/qa/1", Collections.emptyMap(), questionDoc);
|
||||
StringEntity answerDoc1 = new StringEntity("{\n" +
|
||||
" \"owner\": {\n" +
|
||||
" \"location\": \"Norfolk, United Kingdom\",\n" +
|
||||
|
@ -341,7 +342,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
" },\n" +
|
||||
" \"creation_date\": \"2009-05-04T13:45:37.030\"\n" +
|
||||
"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/" + indexName + "/qa/2", Collections.singletonMap("routing", "1"), answerDoc1);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/" + indexName + "/qa/2", Collections.singletonMap("routing", "1"), answerDoc1);
|
||||
StringEntity answerDoc2 = new StringEntity("{\n" +
|
||||
" \"owner\": {\n" +
|
||||
" \"location\": \"Norfolk, United Kingdom\",\n" +
|
||||
|
@ -355,8 +356,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
" },\n" +
|
||||
" \"creation_date\": \"2009-05-05T13:45:37.030\"\n" +
|
||||
"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/" + indexName + "/qa/3", Collections.singletonMap("routing", "1"), answerDoc2);
|
||||
client().performRequest("POST", "/_refresh");
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/" + indexName + "/qa/3", Collections.singletonMap("routing", "1"), answerDoc2);
|
||||
client().performRequest(HttpPost.METHOD_NAME, "/_refresh");
|
||||
|
||||
TermsAggregationBuilder leafTermAgg = new TermsAggregationBuilder("top-names", ValueType.STRING)
|
||||
.field("owner.display_name.keyword").size(10);
|
||||
|
@ -432,14 +433,55 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSearchWithWeirdScriptFields() throws Exception {
|
||||
HttpEntity entity = new NStringEntity("{ \"field\":\"value\"}", ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "test/type/1", Collections.emptyMap(), entity);
|
||||
client().performRequest("POST", "/test/_refresh");
|
||||
|
||||
{
|
||||
SearchRequest searchRequest = new SearchRequest("test").source(SearchSourceBuilder.searchSource()
|
||||
.scriptField("result", new Script("null")));
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchHit searchHit = searchResponse.getHits().getAt(0);
|
||||
List<Object> values = searchHit.getFields().get("result").getValues();
|
||||
assertNotNull(values);
|
||||
assertEquals(1, values.size());
|
||||
assertNull(values.get(0));
|
||||
}
|
||||
{
|
||||
SearchRequest searchRequest = new SearchRequest("test").source(SearchSourceBuilder.searchSource()
|
||||
.scriptField("result", new Script("new HashMap()")));
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchHit searchHit = searchResponse.getHits().getAt(0);
|
||||
List<Object> values = searchHit.getFields().get("result").getValues();
|
||||
assertNotNull(values);
|
||||
assertEquals(1, values.size());
|
||||
assertThat(values.get(0), instanceOf(Map.class));
|
||||
Map<?, ?> map = (Map<?, ?>) values.get(0);
|
||||
assertEquals(0, map.size());
|
||||
}
|
||||
{
|
||||
SearchRequest searchRequest = new SearchRequest("test").source(SearchSourceBuilder.searchSource()
|
||||
.scriptField("result", new Script("new String[]{}")));
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchHit searchHit = searchResponse.getHits().getAt(0);
|
||||
List<Object> values = searchHit.getFields().get("result").getValues();
|
||||
assertNotNull(values);
|
||||
assertEquals(1, values.size());
|
||||
assertThat(values.get(0), instanceOf(List.class));
|
||||
List<?> list = (List<?>) values.get(0);
|
||||
assertEquals(0, list.size());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSearchScroll() throws Exception {
|
||||
|
||||
for (int i = 0; i < 100; i++) {
|
||||
XContentBuilder builder = jsonBuilder().startObject().field("field", i).endObject();
|
||||
HttpEntity entity = new NStringEntity(builder.string(), ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "test/type1/" + Integer.toString(i), Collections.emptyMap(), entity);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "test/type1/" + Integer.toString(i), Collections.emptyMap(), entity);
|
||||
}
|
||||
client().performRequest("POST", "/test/_refresh");
|
||||
client().performRequest(HttpPost.METHOD_NAME, "/test/_refresh");
|
||||
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(35).sort("field", SortOrder.ASC);
|
||||
SearchRequest searchRequest = new SearchRequest("test").scroll(TimeValue.timeValueMinutes(2)).source(searchSourceBuilder);
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.ElasticsearchException;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkProcessor;
|
||||
|
@ -59,13 +60,12 @@ import org.elasticsearch.rest.RestStatus;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.threadpool.Scheduler;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
@ -87,7 +87,7 @@ import static java.util.Collections.singletonMap;
|
|||
*/
|
||||
public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testIndex() throws IOException {
|
||||
public void testIndex() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
|
@ -167,20 +167,6 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
// end::index-response
|
||||
|
||||
// tag::index-execute-async
|
||||
client.indexAsync(request, new ActionListener<IndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndexResponse indexResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
// end::index-execute-async
|
||||
}
|
||||
{
|
||||
IndexRequest request = new IndexRequest("posts", "doc", "1");
|
||||
|
@ -240,9 +226,35 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
// end::index-optype
|
||||
}
|
||||
{
|
||||
IndexRequest request = new IndexRequest("posts", "doc", "async").source("field", "value");
|
||||
// tag::index-execute-listener
|
||||
ActionListener<IndexResponse> listener = new ActionListener<IndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndexResponse indexResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::index-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::index-execute-async
|
||||
client.indexAsync(request, listener); // <1>
|
||||
// end::index-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testUpdate() throws IOException {
|
||||
public void testUpdate() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
IndexRequest indexRequest = new IndexRequest("posts", "doc", "1").source("field", 0);
|
||||
|
@ -378,20 +390,6 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
// end::update-failure
|
||||
|
||||
// tag::update-execute-async
|
||||
client.updateAsync(request, new ActionListener<UpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
// end::update-execute-async
|
||||
}
|
||||
{
|
||||
//tag::update-docnotfound
|
||||
|
@ -497,9 +495,36 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
request.waitForActiveShards(ActiveShardCount.ALL); // <2>
|
||||
// end::update-request-active-shards
|
||||
}
|
||||
{
|
||||
UpdateRequest request = new UpdateRequest("posts", "doc", "async").doc("reason", "async update").docAsUpsert(true);
|
||||
|
||||
// tag::update-execute-listener
|
||||
ActionListener<UpdateResponse> listener = new ActionListener<UpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::update-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::update-execute-async
|
||||
client.updateAsync(request, listener); // <1>
|
||||
// end::update-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDelete() throws IOException {
|
||||
public void testDelete() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
|
@ -536,20 +561,6 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
// end::delete-response
|
||||
|
||||
// tag::delete-execute-async
|
||||
client.deleteAsync(request, new ActionListener<DeleteResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteResponse deleteResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
// end::delete-execute-async
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -601,9 +612,39 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
// end::delete-conflict
|
||||
}
|
||||
{
|
||||
IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "async").source("field", "value"));
|
||||
assertSame(indexResponse.status(), RestStatus.CREATED);
|
||||
|
||||
DeleteRequest request = new DeleteRequest("posts", "doc", "async");
|
||||
|
||||
// tag::delete-execute-listener
|
||||
ActionListener<DeleteResponse> listener = new ActionListener<DeleteResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteResponse deleteResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::delete-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::delete-execute-async
|
||||
client.deleteAsync(request, listener); // <1>
|
||||
// end::delete-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testBulk() throws IOException {
|
||||
public void testBulk() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
// tag::bulk-request
|
||||
|
@ -679,8 +720,8 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
request.waitForActiveShards(ActiveShardCount.ALL); // <2>
|
||||
// end::bulk-request-active-shards
|
||||
|
||||
// tag::bulk-execute-async
|
||||
client.bulkAsync(request, new ActionListener<BulkResponse>() {
|
||||
// tag::bulk-execute-listener
|
||||
ActionListener<BulkResponse> listener = new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse bulkResponse) {
|
||||
// <1>
|
||||
|
@ -690,12 +731,22 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
};
|
||||
// end::bulk-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::bulk-execute-async
|
||||
client.bulkAsync(request, listener); // <1>
|
||||
// end::bulk-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGet() throws IOException {
|
||||
public void testGet() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
String mappings = "{\n" +
|
||||
|
@ -822,8 +873,9 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
{
|
||||
GetRequest request = new GetRequest("posts", "doc", "1");
|
||||
//tag::get-execute-async
|
||||
client.getAsync(request, new ActionListener<GetResponse>() {
|
||||
|
||||
// tag::get-execute-listener
|
||||
ActionListener<GetResponse> listener = new ActionListener<GetResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetResponse getResponse) {
|
||||
// <1>
|
||||
|
@ -833,8 +885,18 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
};
|
||||
// end::get-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
//tag::get-execute-async
|
||||
client.getAsync(request, listener); // <1>
|
||||
//end::get-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
{
|
||||
//tag::get-indexnotfound
|
||||
|
@ -862,7 +924,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testBulkProcessor() throws InterruptedException, IOException {
|
||||
public void testBulkProcessor() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
// tag::bulk-processor-init
|
||||
|
|
|
@ -21,28 +21,43 @@ package org.elasticsearch.client.documentation;
|
|||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
|
||||
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* This class is used to generate the Java Indices API documentation.
|
||||
|
@ -60,6 +75,71 @@ import java.io.IOException;
|
|||
*/
|
||||
public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testIndicesExist() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"));
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
{
|
||||
// tag::indices-exists-request
|
||||
GetIndexRequest request = new GetIndexRequest();
|
||||
request.indices("twitter"); // <1>
|
||||
// end::indices-exists-request
|
||||
|
||||
IndicesOptions indicesOptions = IndicesOptions.strictExpand();
|
||||
// tag::indices-exists-request-optionals
|
||||
request.local(false); // <1>
|
||||
request.humanReadable(true); // <2>
|
||||
request.includeDefaults(false); // <3>
|
||||
request.flatSettings(false); // <4>
|
||||
request.indicesOptions(indicesOptions); // <5>
|
||||
// end::indices-exists-request-optionals
|
||||
|
||||
// tag::indices-exists-response
|
||||
boolean exists = client.indices().exists(request);
|
||||
// end::indices-exists-response
|
||||
assertTrue(exists);
|
||||
}
|
||||
}
|
||||
|
||||
public void testIndicesExistAsync() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"));
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
{
|
||||
GetIndexRequest request = new GetIndexRequest();
|
||||
request.indices("twitter");
|
||||
|
||||
// tag::indices-exists-execute-listener
|
||||
ActionListener<Boolean> listener = new ActionListener<Boolean>() {
|
||||
@Override
|
||||
public void onResponse(Boolean exists) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::indices-exists-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::indices-exists-async
|
||||
client.indices().existsAsync(request, listener); // <1>
|
||||
// end::indices-exists-async
|
||||
}
|
||||
}
|
||||
public void testDeleteIndex() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
|
@ -120,8 +200,8 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
{
|
||||
DeleteIndexRequest request = new DeleteIndexRequest("posts");
|
||||
|
||||
// tag::delete-index-execute-async
|
||||
client.indices().deleteAsync(request, new ActionListener<DeleteIndexResponse>() {
|
||||
// tag::delete-index-execute-listener
|
||||
ActionListener<DeleteIndexResponse> listener = new ActionListener<DeleteIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteIndexResponse deleteIndexResponse) {
|
||||
// <1>
|
||||
|
@ -131,14 +211,18 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
};
|
||||
// end::delete-index-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::delete-index-execute-async
|
||||
client.indices().deleteAsync(request, listener); // <1>
|
||||
// end::delete-index-execute-async
|
||||
|
||||
assertBusy(() -> {
|
||||
// TODO Use Indices Exist API instead once it exists
|
||||
Response response = client.getLowLevelClient().performRequest("HEAD", "posts");
|
||||
assertTrue(RestStatus.NOT_FOUND.getStatus() == response.getStatusLine().getStatusCode());
|
||||
});
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,24 +241,78 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
);
|
||||
// end::create-index-request-settings
|
||||
|
||||
// tag::create-index-request-mappings
|
||||
request.mapping("tweet", // <1>
|
||||
"{\n" +
|
||||
" \"tweet\": {\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"message\": {\n" +
|
||||
" \"type\": \"text\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}", // <2>
|
||||
XContentType.JSON);
|
||||
// end::create-index-request-mappings
|
||||
{
|
||||
// tag::create-index-request-mappings
|
||||
request.mapping("tweet", // <1>
|
||||
"{\n" +
|
||||
" \"tweet\": {\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"message\": {\n" +
|
||||
" \"type\": \"text\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}", // <2>
|
||||
XContentType.JSON);
|
||||
// end::create-index-request-mappings
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(request);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
{
|
||||
request = new CreateIndexRequest("twitter2");
|
||||
//tag::create-index-mappings-map
|
||||
Map<String, Object> jsonMap = new HashMap<>();
|
||||
Map<String, Object> message = new HashMap<>();
|
||||
message.put("type", "text");
|
||||
Map<String, Object> properties = new HashMap<>();
|
||||
properties.put("message", message);
|
||||
Map<String, Object> tweet = new HashMap<>();
|
||||
tweet.put("properties", properties);
|
||||
jsonMap.put("tweet", tweet);
|
||||
request.mapping("tweet", jsonMap); // <1>
|
||||
//end::create-index-mappings-map
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(request);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
{
|
||||
request = new CreateIndexRequest("twitter3");
|
||||
//tag::create-index-mappings-xcontent
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
{
|
||||
builder.startObject("tweet");
|
||||
{
|
||||
builder.startObject("properties");
|
||||
{
|
||||
builder.startObject("message");
|
||||
{
|
||||
builder.field("type", "text");
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
request.mapping("tweet", builder); // <1>
|
||||
//end::create-index-mappings-xcontent
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(request);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
{
|
||||
request = new CreateIndexRequest("twitter4");
|
||||
//tag::create-index-mappings-shortcut
|
||||
request.mapping("tweet", "message", "type=text"); // <1>
|
||||
//end::create-index-mappings-shortcut
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(request);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
request = new CreateIndexRequest("twitter5");
|
||||
// tag::create-index-request-aliases
|
||||
request.alias(
|
||||
new Alias("twitter_alias") // <1>
|
||||
);
|
||||
request.alias(new Alias("twitter_alias").filter(QueryBuilders.termQuery("user", "kimchy"))); // <1>
|
||||
// end::create-index-request-aliases
|
||||
|
||||
// tag::create-index-request-timeout
|
||||
|
@ -189,6 +327,30 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
request.waitForActiveShards(2); // <1>
|
||||
request.waitForActiveShards(ActiveShardCount.DEFAULT); // <2>
|
||||
// end::create-index-request-waitForActiveShards
|
||||
{
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(request);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
request = new CreateIndexRequest("twitter6");
|
||||
// tag::create-index-whole-source
|
||||
request.source("{\n" +
|
||||
" \"settings\" : {\n" +
|
||||
" \"number_of_shards\" : 1,\n" +
|
||||
" \"number_of_replicas\" : 0\n" +
|
||||
" },\n" +
|
||||
" \"mappings\" : {\n" +
|
||||
" \"tweet\" : {\n" +
|
||||
" \"properties\" : {\n" +
|
||||
" \"message\" : { \"type\" : \"text\" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" },\n" +
|
||||
" \"aliases\" : {\n" +
|
||||
" \"twitter_alias\" : {}\n" +
|
||||
" }\n" +
|
||||
"}", XContentType.JSON); // <1>
|
||||
// end::create-index-whole-source
|
||||
|
||||
// tag::create-index-execute
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(request);
|
||||
|
@ -208,8 +370,9 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
|
||||
{
|
||||
CreateIndexRequest request = new CreateIndexRequest("twitter");
|
||||
// tag::create-index-execute-async
|
||||
client.indices().createAsync(request, new ActionListener<CreateIndexResponse>() {
|
||||
|
||||
// tag::create-index-execute-listener
|
||||
ActionListener<CreateIndexResponse> listener = new ActionListener<CreateIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(CreateIndexResponse createIndexResponse) {
|
||||
// <1>
|
||||
|
@ -219,14 +382,18 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
};
|
||||
// end::create-index-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::create-index-execute-async
|
||||
client.indices().createAsync(request, listener); // <1>
|
||||
// end::create-index-execute-async
|
||||
|
||||
assertBusy(() -> {
|
||||
// TODO Use Indices Exist API instead once it exists
|
||||
Response response = client.getLowLevelClient().performRequest("HEAD", "twitter");
|
||||
assertTrue(RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode());
|
||||
});
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -258,6 +425,54 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
XContentType.JSON);
|
||||
// end::put-mapping-request-source
|
||||
|
||||
{
|
||||
//tag::put-mapping-map
|
||||
Map<String, Object> jsonMap = new HashMap<>();
|
||||
Map<String, Object> message = new HashMap<>();
|
||||
message.put("type", "text");
|
||||
Map<String, Object> properties = new HashMap<>();
|
||||
properties.put("message", message);
|
||||
Map<String, Object> tweet = new HashMap<>();
|
||||
tweet.put("properties", properties);
|
||||
jsonMap.put("tweet", tweet);
|
||||
request.source(jsonMap); // <1>
|
||||
//end::put-mapping-map
|
||||
PutMappingResponse putMappingResponse = client.indices().putMapping(request);
|
||||
assertTrue(putMappingResponse.isAcknowledged());
|
||||
}
|
||||
{
|
||||
//tag::put-mapping-xcontent
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
{
|
||||
builder.startObject("tweet");
|
||||
{
|
||||
builder.startObject("properties");
|
||||
{
|
||||
builder.startObject("message");
|
||||
{
|
||||
builder.field("type", "text");
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
request.source(builder); // <1>
|
||||
//end::put-mapping-xcontent
|
||||
PutMappingResponse putMappingResponse = client.indices().putMapping(request);
|
||||
assertTrue(putMappingResponse.isAcknowledged());
|
||||
}
|
||||
{
|
||||
//tag::put-mapping-shortcut
|
||||
request.source("message", "type=text"); // <1>
|
||||
//end::put-mapping-shortcut
|
||||
PutMappingResponse putMappingResponse = client.indices().putMapping(request);
|
||||
assertTrue(putMappingResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
// tag::put-mapping-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
|
@ -288,8 +503,9 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
|
||||
{
|
||||
PutMappingRequest request = new PutMappingRequest("twitter").type("tweet");
|
||||
// tag::put-mapping-execute-async
|
||||
client.indices().putMappingAsync(request, new ActionListener<PutMappingResponse>() {
|
||||
|
||||
// tag::put-mapping-execute-listener
|
||||
ActionListener<PutMappingResponse> listener = new ActionListener<PutMappingResponse>() {
|
||||
@Override
|
||||
public void onResponse(PutMappingResponse putMappingResponse) {
|
||||
// <1>
|
||||
|
@ -299,18 +515,22 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
};
|
||||
// end::put-mapping-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::put-mapping-execute-async
|
||||
client.indices().putMappingAsync(request, listener); // <1>
|
||||
// end::put-mapping-execute-async
|
||||
|
||||
assertBusy(() -> {
|
||||
// TODO Use Indices Exist API instead once it exists
|
||||
Response response = client.getLowLevelClient().performRequest("HEAD", "twitter");
|
||||
assertTrue(RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode());
|
||||
});
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testOpenIndex() throws IOException {
|
||||
public void testOpenIndex() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
|
@ -336,7 +556,6 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
request.waitForActiveShards(ActiveShardCount.DEFAULT); // <2>
|
||||
// end::open-index-request-waitForActiveShards
|
||||
|
||||
|
||||
// tag::open-index-request-indicesOptions
|
||||
request.indicesOptions(IndicesOptions.strictExpandOpen()); // <1>
|
||||
// end::open-index-request-indicesOptions
|
||||
|
@ -352,8 +571,8 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
assertTrue(acknowledged);
|
||||
assertTrue(shardsAcked);
|
||||
|
||||
// tag::open-index-execute-async
|
||||
client.indices().openAsync(request, new ActionListener<OpenIndexResponse>() {
|
||||
// tag::open-index-execute-listener
|
||||
ActionListener<OpenIndexResponse> listener = new ActionListener<OpenIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(OpenIndexResponse openIndexResponse) {
|
||||
// <1>
|
||||
|
@ -363,8 +582,18 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
};
|
||||
// end::open-index-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::open-index-execute-async
|
||||
client.indices().openAsync(request, listener); // <1>
|
||||
// end::open-index-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -381,7 +610,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
}
|
||||
}
|
||||
|
||||
public void testCloseIndex() throws IOException {
|
||||
public void testCloseIndex() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
|
@ -416,8 +645,8 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
// end::close-index-response
|
||||
assertTrue(acknowledged);
|
||||
|
||||
// tag::close-index-execute-async
|
||||
client.indices().closeAsync(request, new ActionListener<CloseIndexResponse>() {
|
||||
// tag::close-index-execute-listener
|
||||
ActionListener<CloseIndexResponse> listener = new ActionListener<CloseIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(CloseIndexResponse closeIndexResponse) {
|
||||
// <1>
|
||||
|
@ -427,21 +656,301 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
};
|
||||
// end::close-index-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::close-index-execute-async
|
||||
client.indices().closeAsync(request, listener); // <1>
|
||||
// end::close-index-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testExistsAlias() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")
|
||||
.alias(new Alias("alias")));
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
{
|
||||
// tag::close-index-notfound
|
||||
try {
|
||||
CloseIndexRequest request = new CloseIndexRequest("does_not_exist");
|
||||
client.indices().close(request);
|
||||
} catch (ElasticsearchException exception) {
|
||||
if (exception.status() == RestStatus.BAD_REQUEST) {
|
||||
// tag::exists-alias-request
|
||||
GetAliasesRequest request = new GetAliasesRequest();
|
||||
GetAliasesRequest requestWithAlias = new GetAliasesRequest("alias1");
|
||||
GetAliasesRequest requestWithAliases = new GetAliasesRequest(new String[]{"alias1", "alias2"});
|
||||
// end::exists-alias-request
|
||||
|
||||
// tag::exists-alias-request-alias
|
||||
request.aliases("alias"); // <1>
|
||||
// end::exists-alias-request-alias
|
||||
// tag::exists-alias-request-indices
|
||||
request.indices("index"); // <1>
|
||||
// end::exists-alias-request-indices
|
||||
|
||||
// tag::exists-alias-request-indicesOptions
|
||||
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
|
||||
// end::exists-alias-request-indicesOptions
|
||||
|
||||
// tag::exists-alias-request-local
|
||||
request.local(true); // <1>
|
||||
// end::exists-alias-request-local
|
||||
|
||||
// tag::exists-alias-execute
|
||||
boolean exists = client.indices().existsAlias(request);
|
||||
// end::exists-alias-execute
|
||||
assertTrue(exists);
|
||||
|
||||
// tag::exists-alias-listener
|
||||
ActionListener<Boolean> listener = new ActionListener<Boolean>() {
|
||||
@Override
|
||||
public void onResponse(Boolean exists) {
|
||||
// <1>
|
||||
}
|
||||
}
|
||||
// end::close-index-notfound
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::exists-alias-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::exists-alias-execute-async
|
||||
client.indices().existsAliasAsync(request, listener); // <1>
|
||||
// end::exists-alias-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public void testUpdateAliases() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index1"));
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
createIndexResponse = client.indices().create(new CreateIndexRequest("index2"));
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
createIndexResponse = client.indices().create(new CreateIndexRequest("index3"));
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
createIndexResponse = client.indices().create(new CreateIndexRequest("index4"));
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
{
|
||||
// tag::update-aliases-request
|
||||
IndicesAliasesRequest request = new IndicesAliasesRequest(); // <1>
|
||||
AliasActions aliasAction = new AliasActions(AliasActions.Type.ADD).index("index1").alias("alias1"); // <2>
|
||||
request.addAliasAction(aliasAction); // <3>
|
||||
// end::update-aliases-request
|
||||
|
||||
// tag::update-aliases-request2
|
||||
AliasActions addIndexAction = new AliasActions(AliasActions.Type.ADD).index("index1").alias("alias1")
|
||||
.filter("{\"term\":{\"year\":2016}}"); // <1>
|
||||
AliasActions addIndicesAction = new AliasActions(AliasActions.Type.ADD).indices("index1", "index2").alias("alias2")
|
||||
.routing("1"); // <2>
|
||||
AliasActions removeAction = new AliasActions(AliasActions.Type.REMOVE).index("index3").alias("alias3"); // <3>
|
||||
AliasActions removeIndexAction = new AliasActions(AliasActions.Type.REMOVE_INDEX).index("index4"); // <4>
|
||||
// end::update-aliases-request2
|
||||
|
||||
// tag::update-aliases-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::update-aliases-request-timeout
|
||||
// tag::update-aliases-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::update-aliases-request-masterTimeout
|
||||
|
||||
// tag::update-aliases-execute
|
||||
IndicesAliasesResponse indicesAliasesResponse = client.indices().updateAliases(request);
|
||||
// end::update-aliases-execute
|
||||
|
||||
// tag::update-aliases-response
|
||||
boolean acknowledged = indicesAliasesResponse.isAcknowledged(); // <1>
|
||||
// end::update-aliases-response
|
||||
assertTrue(acknowledged);
|
||||
}
|
||||
{
|
||||
IndicesAliasesRequest request = new IndicesAliasesRequest(); // <1>
|
||||
AliasActions aliasAction = new AliasActions(AliasActions.Type.ADD).index("index1").alias("async"); // <2>
|
||||
request.addAliasAction(aliasAction);
|
||||
|
||||
// tag::update-aliases-execute-listener
|
||||
ActionListener<IndicesAliasesResponse> listener = new ActionListener<IndicesAliasesResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndicesAliasesResponse indicesAliasesResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::update-aliases-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::update-aliases-execute-async
|
||||
client.indices().updateAliasesAsync(request, listener); // <1>
|
||||
// end::update-aliases-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public void testShrinkIndex() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
Map<String, Object> nodes = getAsMap("_nodes");
|
||||
String firstNode = ((Map<String, Object>) nodes.get("nodes")).keySet().iterator().next();
|
||||
createIndex("source_index", Settings.builder().put("index.number_of_shards", 4).put("index.number_of_replicas", 0).build());
|
||||
updateIndexSettings("source_index", Settings.builder().put("index.routing.allocation.require._name", firstNode)
|
||||
.put("index.blocks.write", true));
|
||||
}
|
||||
|
||||
// tag::shrink-index-request
|
||||
ResizeRequest request = new ResizeRequest("target_index","source_index"); // <1>
|
||||
// end::shrink-index-request
|
||||
|
||||
// tag::shrink-index-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::shrink-index-request-timeout
|
||||
// tag::shrink-index-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::shrink-index-request-masterTimeout
|
||||
// tag::shrink-index-request-waitForActiveShards
|
||||
request.getTargetIndexRequest().waitForActiveShards(2); // <1>
|
||||
request.getTargetIndexRequest().waitForActiveShards(ActiveShardCount.DEFAULT); // <2>
|
||||
// end::shrink-index-request-waitForActiveShards
|
||||
// tag::shrink-index-request-settings
|
||||
request.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", 2)); // <1>
|
||||
// end::shrink-index-request-settings
|
||||
// tag::shrink-index-request-aliases
|
||||
request.getTargetIndexRequest().alias(new Alias("target_alias")); // <1>
|
||||
// end::shrink-index-request-aliases
|
||||
|
||||
// tag::shrink-index-execute
|
||||
ResizeResponse resizeResponse = client.indices().shrink(request);
|
||||
// end::shrink-index-execute
|
||||
|
||||
// tag::shrink-index-response
|
||||
boolean acknowledged = resizeResponse.isAcknowledged(); // <1>
|
||||
boolean shardsAcked = resizeResponse.isShardsAcknowledged(); // <2>
|
||||
// end::shrink-index-response
|
||||
assertTrue(acknowledged);
|
||||
assertTrue(shardsAcked);
|
||||
|
||||
// tag::shrink-index-execute-listener
|
||||
ActionListener<ResizeResponse> listener = new ActionListener<ResizeResponse>() {
|
||||
@Override
|
||||
public void onResponse(ResizeResponse resizeResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::shrink-index-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::shrink-index-execute-async
|
||||
client.indices().shrinkAsync(request, listener); // <1>
|
||||
// end::shrink-index-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public void testSplitIndex() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
createIndex("source_index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0)
|
||||
.put("index.number_of_routing_shards", 4).build());
|
||||
updateIndexSettings("source_index", Settings.builder().put("index.blocks.write", true));
|
||||
}
|
||||
|
||||
// tag::split-index-request
|
||||
ResizeRequest request = new ResizeRequest("target_index","source_index"); // <1>
|
||||
request.setResizeType(ResizeType.SPLIT); // <2>
|
||||
// end::split-index-request
|
||||
|
||||
// tag::split-index-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::split-index-request-timeout
|
||||
// tag::split-index-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::split-index-request-masterTimeout
|
||||
// tag::split-index-request-waitForActiveShards
|
||||
request.getTargetIndexRequest().waitForActiveShards(2); // <1>
|
||||
request.getTargetIndexRequest().waitForActiveShards(ActiveShardCount.DEFAULT); // <2>
|
||||
// end::split-index-request-waitForActiveShards
|
||||
// tag::split-index-request-settings
|
||||
request.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", 4)); // <1>
|
||||
// end::split-index-request-settings
|
||||
// tag::split-index-request-aliases
|
||||
request.getTargetIndexRequest().alias(new Alias("target_alias")); // <1>
|
||||
// end::split-index-request-aliases
|
||||
|
||||
// tag::split-index-execute
|
||||
ResizeResponse resizeResponse = client.indices().split(request);
|
||||
// end::split-index-execute
|
||||
|
||||
// tag::split-index-response
|
||||
boolean acknowledged = resizeResponse.isAcknowledged(); // <1>
|
||||
boolean shardsAcked = resizeResponse.isShardsAcknowledged(); // <2>
|
||||
// end::split-index-response
|
||||
assertTrue(acknowledged);
|
||||
assertTrue(shardsAcked);
|
||||
|
||||
// tag::split-index-execute-listener
|
||||
ActionListener<ResizeResponse> listener = new ActionListener<ResizeResponse>() {
|
||||
@Override
|
||||
public void onResponse(ResizeResponse resizeResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::split-index-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::split-index-execute-async
|
||||
client.indices().splitAsync(request,listener); // <1>
|
||||
// end::split-index-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
|
@ -75,6 +76,7 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
|
@ -99,8 +101,8 @@ import static org.hamcrest.Matchers.greaterThan;
|
|||
*/
|
||||
public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@SuppressWarnings({ "unused", "unchecked" })
|
||||
public void testSearch() throws IOException {
|
||||
@SuppressWarnings({"unused", "unchecked"})
|
||||
public void testSearch() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
BulkRequest request = new BulkRequest();
|
||||
|
@ -174,8 +176,8 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
SearchResponse searchResponse = client.search(searchRequest);
|
||||
// end::search-execute
|
||||
|
||||
// tag::search-execute-async
|
||||
client.searchAsync(searchRequest, new ActionListener<SearchResponse>() {
|
||||
// tag::search-execute-listener
|
||||
ActionListener<SearchResponse> listener = new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse searchResponse) {
|
||||
// <1>
|
||||
|
@ -185,9 +187,19 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
};
|
||||
// end::search-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::search-execute-async
|
||||
client.searchAsync(searchRequest, listener); // <1>
|
||||
// end::search-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
// tag::search-response-1
|
||||
RestStatus status = searchResponse.status();
|
||||
TimeValue took = searchResponse.getTook();
|
||||
|
@ -343,7 +355,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({ "unused", "rawtypes" })
|
||||
@SuppressWarnings({"unused", "rawtypes"})
|
||||
public void testSearchRequestSuggestions() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
|
@ -449,6 +461,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testSearchRequestProfiling() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
|
@ -517,7 +530,8 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testScroll() throws IOException {
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testScroll() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
BulkRequest request = new BulkRequest();
|
||||
|
@ -587,8 +601,8 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
assertEquals(0, searchResponse.getFailedShards());
|
||||
assertEquals(3L, searchResponse.getHits().getTotalHits());
|
||||
|
||||
// tag::search-scroll-execute-async
|
||||
client.searchScrollAsync(scrollRequest, new ActionListener<SearchResponse>() {
|
||||
// tag::search-scroll-execute-listener
|
||||
ActionListener<SearchResponse> scrollListener = new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse searchResponse) {
|
||||
// <1>
|
||||
|
@ -598,9 +612,19 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
};
|
||||
// end::search-scroll-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
scrollListener = new LatchedActionListener<>(scrollListener, latch);
|
||||
|
||||
// tag::search-scroll-execute-async
|
||||
client.searchScrollAsync(scrollRequest, scrollListener); // <1>
|
||||
// end::search-scroll-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
// tag::clear-scroll-request
|
||||
ClearScrollRequest request = new ClearScrollRequest(); // <1>
|
||||
request.addScrollId(scrollId); // <2>
|
||||
|
@ -627,8 +651,8 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
assertTrue(success);
|
||||
assertThat(released, greaterThan(0));
|
||||
|
||||
// tag::clear-scroll-execute-async
|
||||
client.clearScrollAsync(request, new ActionListener<ClearScrollResponse>() {
|
||||
// tag::clear-scroll-execute-listener
|
||||
ActionListener<ClearScrollResponse> listener =new ActionListener<ClearScrollResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClearScrollResponse clearScrollResponse) {
|
||||
// <1>
|
||||
|
@ -638,8 +662,18 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
};
|
||||
// end::clear-scroll-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch clearScrollLatch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, clearScrollLatch);
|
||||
|
||||
// tag::clear-scroll-execute-async
|
||||
client.clearScrollAsync(request, listener); // <1>
|
||||
// end::clear-scroll-execute-async
|
||||
|
||||
assertTrue(clearScrollLatch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
{
|
||||
// tag::search-scroll-example
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.plugins;
|
|||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
|
||||
import org.apache.lucene.search.spell.LevensteinDistance;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
@ -566,7 +567,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
}
|
||||
|
||||
/** Load information about the plugin, and verify it can be installed with no errors. */
|
||||
private PluginInfo verify(Terminal terminal, Path pluginRoot, boolean isBatch, Environment env) throws Exception {
|
||||
private PluginInfo loadPluginInfo(Terminal terminal, Path pluginRoot, boolean isBatch, Environment env) throws Exception {
|
||||
final PluginInfo info = PluginInfo.readFromProperties(pluginRoot);
|
||||
|
||||
// checking for existing version of the plugin
|
||||
|
@ -586,13 +587,6 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
// check for jar hell before any copying
|
||||
jarHellCheck(info, pluginRoot, env.pluginsFile(), env.modulesFile());
|
||||
|
||||
// read optional security policy (extra permissions)
|
||||
// if it exists, confirm or warn the user
|
||||
Path policy = pluginRoot.resolve(PluginInfo.ES_PLUGIN_POLICY);
|
||||
if (Files.exists(policy)) {
|
||||
PluginSecurity.readPolicy(info, policy, terminal, env::tmpFile, isBatch);
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
|
@ -663,15 +657,34 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
pluginPaths.add(plugin);
|
||||
}
|
||||
}
|
||||
|
||||
// read optional security policy from each bundled plugin, and confirm all exceptions one time with user
|
||||
|
||||
Set<String> permissions = new HashSet<>();
|
||||
final List<PluginInfo> pluginInfos = new ArrayList<>();
|
||||
boolean hasNativeController = false;
|
||||
for (Path plugin : pluginPaths) {
|
||||
final PluginInfo info = verify(terminal, plugin, isBatch, env);
|
||||
final PluginInfo info = loadPluginInfo(terminal, plugin, isBatch, env);
|
||||
pluginInfos.add(info);
|
||||
installPluginSupportFiles(info, plugin, env.binFile().resolve(metaInfo.getName()),
|
||||
|
||||
hasNativeController |= info.hasNativeController();
|
||||
|
||||
Path policy = plugin.resolve(PluginInfo.ES_PLUGIN_POLICY);
|
||||
if (Files.exists(policy)) {
|
||||
permissions.addAll(PluginSecurity.parsePermissions(policy, env.tmpFile()));
|
||||
}
|
||||
}
|
||||
PluginSecurity.confirmPolicyExceptions(terminal, permissions, hasNativeController, isBatch);
|
||||
|
||||
// move support files and rename as needed to prepare the exploded plugin for its final location
|
||||
for (int i = 0; i < pluginPaths.size(); ++i) {
|
||||
Path pluginPath = pluginPaths.get(i);
|
||||
PluginInfo info = pluginInfos.get(i);
|
||||
installPluginSupportFiles(info, pluginPath, env.binFile().resolve(metaInfo.getName()),
|
||||
env.configFile().resolve(metaInfo.getName()), deleteOnFailure);
|
||||
// ensure the plugin dir within the tmpRoot has the correct name
|
||||
if (plugin.getFileName().toString().equals(info.getName()) == false) {
|
||||
Files.move(plugin, plugin.getParent().resolve(info.getName()), StandardCopyOption.ATOMIC_MOVE);
|
||||
if (pluginPath.getFileName().toString().equals(info.getName()) == false) {
|
||||
Files.move(pluginPath, pluginPath.getParent().resolve(info.getName()), StandardCopyOption.ATOMIC_MOVE);
|
||||
}
|
||||
}
|
||||
movePlugin(tmpRoot, destination);
|
||||
|
@ -691,7 +704,14 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
*/
|
||||
private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot,
|
||||
Environment env, List<Path> deleteOnFailure) throws Exception {
|
||||
final PluginInfo info = verify(terminal, tmpRoot, isBatch, env);
|
||||
final PluginInfo info = loadPluginInfo(terminal, tmpRoot, isBatch, env);
|
||||
// read optional security policy (extra permissions), if it exists, confirm or warn the user
|
||||
Path policy = tmpRoot.resolve(PluginInfo.ES_PLUGIN_POLICY);
|
||||
if (Files.exists(policy)) {
|
||||
Set<String> permissions = PluginSecurity.parsePermissions(policy, env.tmpFile());
|
||||
PluginSecurity.confirmPolicyExceptions(terminal, permissions, info.hasNativeController(), isBatch);
|
||||
}
|
||||
|
||||
final Path destination = env.pluginsFile().resolve(info.getName());
|
||||
deleteOnFailure.add(destination);
|
||||
|
||||
|
@ -810,8 +830,8 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile());
|
||||
if (keystore == null) {
|
||||
terminal.println("Elasticsearch keystore is required by plugin [" + info.getName() + "], creating...");
|
||||
keystore = KeyStoreWrapper.create(new char[0]);
|
||||
keystore.save(env.configFile());
|
||||
keystore = KeyStoreWrapper.create();
|
||||
keystore.save(env.configFile(), new char[0]);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.plugins;
|
|||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import com.google.common.jimfs.Configuration;
|
||||
import com.google.common.jimfs.Jimfs;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
|
@ -45,7 +45,6 @@ import org.junit.After;
|
|||
import org.junit.Before;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.StringReader;
|
||||
|
@ -85,6 +84,7 @@ import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
|
|||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
|
@ -95,6 +95,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
private InstallPluginCommand defaultCommand;
|
||||
|
||||
private final Function<String, Path> temp;
|
||||
private final MockTerminal terminal = new MockTerminal();
|
||||
|
||||
private final FileSystem fs;
|
||||
private final boolean isPosix;
|
||||
|
@ -112,6 +113,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
System.setProperty("java.io.tmpdir", temp.apply("tmpdir").toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
|
@ -122,8 +124,10 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
}
|
||||
};
|
||||
defaultCommand = new InstallPluginCommand();
|
||||
terminal.reset();
|
||||
}
|
||||
|
||||
@Override
|
||||
@After
|
||||
@SuppressForbidden(reason = "resets java.io.tmpdir")
|
||||
public void tearDown() throws Exception {
|
||||
|
@ -213,7 +217,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
|
||||
/** creates a plugin .zip and returns the url for testing */
|
||||
static String createPluginUrl(String name, Path structure, String... additionalProps) throws IOException {
|
||||
return createPlugin(name, structure, false, additionalProps).toUri().toURL().toString();
|
||||
return createPlugin(name, structure, additionalProps).toUri().toURL().toString();
|
||||
}
|
||||
|
||||
/** creates an meta plugin .zip and returns the url for testing */
|
||||
|
@ -228,7 +232,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
static void writePlugin(String name, Path structure, boolean createSecurityPolicyFile, String... additionalProps) throws IOException {
|
||||
static void writePlugin(String name, Path structure, String... additionalProps) throws IOException {
|
||||
String[] properties = Stream.concat(Stream.of(
|
||||
"description", "fake desc",
|
||||
"name", name,
|
||||
|
@ -238,16 +242,23 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
"classname", "FakePlugin"
|
||||
), Arrays.stream(additionalProps)).toArray(String[]::new);
|
||||
PluginTestUtil.writePluginProperties(structure, properties);
|
||||
if (createSecurityPolicyFile) {
|
||||
String securityPolicyContent = "grant {\n permission java.lang.RuntimePermission \"setFactory\";\n};\n";
|
||||
Files.write(structure.resolve("plugin-security.policy"), securityPolicyContent.getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
String className = name.substring(0, 1).toUpperCase(Locale.ENGLISH) + name.substring(1) + "Plugin";
|
||||
writeJar(structure.resolve("plugin.jar"), className);
|
||||
}
|
||||
|
||||
static Path createPlugin(String name, Path structure, boolean createSecurityPolicyFile, String... additionalProps) throws IOException {
|
||||
writePlugin(name, structure, createSecurityPolicyFile, additionalProps);
|
||||
static void writePluginSecurityPolicy(Path pluginDir, String... permissions) throws IOException {
|
||||
StringBuilder securityPolicyContent = new StringBuilder("grant {\n ");
|
||||
for (String permission : permissions) {
|
||||
securityPolicyContent.append("permission java.lang.RuntimePermission \"");
|
||||
securityPolicyContent.append(permission);
|
||||
securityPolicyContent.append("\";");
|
||||
}
|
||||
securityPolicyContent.append("\n};\n");
|
||||
Files.write(pluginDir.resolve("plugin-security.policy"), securityPolicyContent.toString().getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
|
||||
static Path createPlugin(String name, Path structure, String... additionalProps) throws IOException {
|
||||
writePlugin(name, structure, additionalProps);
|
||||
return writeZip(structure, "elasticsearch");
|
||||
}
|
||||
|
||||
|
@ -256,15 +267,13 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
return writeZip(structure, "elasticsearch");
|
||||
}
|
||||
|
||||
MockTerminal installPlugin(String pluginUrl, Path home) throws Exception {
|
||||
return installPlugin(pluginUrl, home, skipJarHellCommand);
|
||||
void installPlugin(String pluginUrl, Path home) throws Exception {
|
||||
installPlugin(pluginUrl, home, skipJarHellCommand);
|
||||
}
|
||||
|
||||
MockTerminal installPlugin(String pluginUrl, Path home, InstallPluginCommand command) throws Exception {
|
||||
void installPlugin(String pluginUrl, Path home, InstallPluginCommand command) throws Exception {
|
||||
Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build());
|
||||
MockTerminal terminal = new MockTerminal();
|
||||
command.execute(terminal, pluginUrl, true, env);
|
||||
return terminal;
|
||||
command.execute(terminal, pluginUrl, false, env);
|
||||
}
|
||||
|
||||
void assertMetaPlugin(String metaPlugin, String name, Path original, Environment env) throws IOException {
|
||||
|
@ -384,9 +393,9 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
Tuple<Path, Environment> env = createEnv(fs, temp);
|
||||
Path pluginDir = createPluginDir(temp);
|
||||
Files.createDirectory(pluginDir.resolve("fake1"));
|
||||
writePlugin("fake1", pluginDir.resolve("fake1"), false);
|
||||
writePlugin("fake1", pluginDir.resolve("fake1"));
|
||||
Files.createDirectory(pluginDir.resolve("fake2"));
|
||||
writePlugin("fake2", pluginDir.resolve("fake2"), false);
|
||||
writePlugin("fake2", pluginDir.resolve("fake2"));
|
||||
String pluginZip = createMetaPluginUrl("my_plugins", pluginDir);
|
||||
installPlugin(pluginZip, env.v1());
|
||||
assertMetaPlugin("my_plugins", "fake1", pluginDir, env.v2());
|
||||
|
@ -489,9 +498,9 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
Tuple<Path, Environment> environment = createEnv(fs, temp);
|
||||
Path pluginDir = createPluginDir(temp);
|
||||
Files.createDirectory(pluginDir.resolve("fake1"));
|
||||
writePlugin("fake1", pluginDir.resolve("fake1"), false);
|
||||
writePlugin("fake1", pluginDir.resolve("fake1"));
|
||||
Files.createDirectory(pluginDir.resolve("fake2"));
|
||||
writePlugin("fake2", pluginDir.resolve("fake2"), false); // adds plugin.jar with Fake2Plugin
|
||||
writePlugin("fake2", pluginDir.resolve("fake2")); // adds plugin.jar with Fake2Plugin
|
||||
writeJar(pluginDir.resolve("fake2").resolve("other.jar"), "Fake2Plugin");
|
||||
String pluginZip = createMetaPluginUrl("my_plugins", pluginDir);
|
||||
IllegalStateException e = expectThrows(IllegalStateException.class,
|
||||
|
@ -556,7 +565,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
Path metaDir = createPluginDir(temp);
|
||||
Path pluginDir = metaDir.resolve("fake");
|
||||
Files.createDirectory(pluginDir);
|
||||
writePlugin("fake", pluginDir, false);
|
||||
writePlugin("fake", pluginDir);
|
||||
Path binDir = pluginDir.resolve("bin");
|
||||
Files.createDirectory(binDir);
|
||||
Files.createFile(binDir.resolve("somescript"));
|
||||
|
@ -638,7 +647,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
Path metaDir = createPluginDir(temp);
|
||||
Path pluginDir = metaDir.resolve("fake");
|
||||
Files.createDirectory(pluginDir);
|
||||
writePlugin("fake", pluginDir, false);
|
||||
writePlugin("fake", pluginDir);
|
||||
Path binDir = pluginDir.resolve("bin");
|
||||
Files.createDirectory(binDir);
|
||||
Files.createFile(binDir.resolve("somescript"));
|
||||
|
@ -752,7 +761,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
Path metaDir = createPluginDir(temp);
|
||||
Path pluginDir = metaDir.resolve("fake");
|
||||
Files.createDirectory(pluginDir);
|
||||
writePlugin("fake", pluginDir, false);
|
||||
writePlugin("fake", pluginDir);
|
||||
Path configDir = pluginDir.resolve("config");
|
||||
Files.createDirectory(configDir);
|
||||
Files.write(configDir.resolve("custom.yml"), "new config".getBytes(StandardCharsets.UTF_8));
|
||||
|
@ -941,9 +950,9 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
|
||||
Path pluginDir = createPluginDir(temp);
|
||||
Files.createDirectory(pluginDir.resolve("fake"));
|
||||
writePlugin("fake", pluginDir.resolve("fake"), false);
|
||||
writePlugin("fake", pluginDir.resolve("fake"));
|
||||
Files.createDirectory(pluginDir.resolve("other"));
|
||||
writePlugin("other", pluginDir.resolve("other"), false);
|
||||
writePlugin("other", pluginDir.resolve("other"));
|
||||
String metaZip = createMetaPluginUrl("meta", pluginDir);
|
||||
final UserException e = expectThrows(UserException.class,
|
||||
() -> installPlugin(metaZip, env.v1(), randomFrom(skipJarHellCommand, defaultCommand)));
|
||||
|
@ -957,15 +966,18 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
Tuple<Path, Environment> env = createEnv(fs, temp);
|
||||
Path pluginDir = createPluginDir(temp);
|
||||
// if batch is enabled, we also want to add a security policy
|
||||
String pluginZip = createPlugin("fake", pluginDir, isBatch).toUri().toURL().toString();
|
||||
if (isBatch) {
|
||||
writePluginSecurityPolicy(pluginDir, "setFactory");
|
||||
}
|
||||
String pluginZip = createPlugin("fake", pluginDir).toUri().toURL().toString();
|
||||
skipJarHellCommand.execute(terminal, pluginZip, isBatch, env.v2());
|
||||
}
|
||||
|
||||
public MockTerminal assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash,
|
||||
void assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash,
|
||||
String shaExtension, Function<byte[], String> shaCalculator) throws Exception {
|
||||
Tuple<Path, Environment> env = createEnv(fs, temp);
|
||||
Path pluginDir = createPluginDir(temp);
|
||||
Path pluginZip = createPlugin(name, pluginDir, false);
|
||||
Path pluginZip = createPlugin(name, pluginDir);
|
||||
InstallPluginCommand command = new InstallPluginCommand() {
|
||||
@Override
|
||||
Path downloadZip(Terminal terminal, String urlString, Path tmpDir) throws IOException {
|
||||
|
@ -1000,9 +1012,8 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
// no jarhell check
|
||||
}
|
||||
};
|
||||
MockTerminal terminal = installPlugin(pluginId, env.v1(), command);
|
||||
installPlugin(pluginId, env.v1(), command);
|
||||
assertPlugin(name, pluginDir, env.v2());
|
||||
return terminal;
|
||||
}
|
||||
|
||||
public void assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash) throws Exception {
|
||||
|
@ -1046,7 +1057,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
public void testMavenSha1Backcompat() throws Exception {
|
||||
String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip";
|
||||
MessageDigest digest = MessageDigest.getInstance("SHA-1");
|
||||
MockTerminal terminal = assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, ".sha1", checksum(digest));
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, ".sha1", checksum(digest));
|
||||
assertTrue(terminal.getOutput(), terminal.getOutput().contains("sha512 not found, falling back to sha1"));
|
||||
}
|
||||
|
||||
|
@ -1138,8 +1149,8 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
|
||||
public void testKeystoreRequiredAlreadyExists() throws Exception {
|
||||
Tuple<Path, Environment> env = createEnv(fs, temp);
|
||||
KeyStoreWrapper keystore = KeyStoreWrapper.create(new char[0]);
|
||||
keystore.save(env.v2().configFile());
|
||||
KeyStoreWrapper keystore = KeyStoreWrapper.create();
|
||||
keystore.save(env.v2().configFile(), new char[0]);
|
||||
byte[] expectedBytes = Files.readAllBytes(KeyStoreWrapper.keystorePath(env.v2().configFile()));
|
||||
Path pluginDir = createPluginDir(temp);
|
||||
String pluginZip = createPluginUrl("fake", pluginDir, "requires.keystore", "true");
|
||||
|
@ -1152,7 +1163,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
Tuple<Path, Environment> env = createEnv(fs, temp);
|
||||
Path pluginDir = createPluginDir(temp);
|
||||
String pluginZip = createPluginUrl("fake", pluginDir, "requires.keystore", "true");
|
||||
MockTerminal terminal = installPlugin(pluginZip, env.v1());
|
||||
installPlugin(pluginZip, env.v1());
|
||||
assertTrue(Files.exists(KeyStoreWrapper.keystorePath(env.v2().configFile())));
|
||||
}
|
||||
|
||||
|
@ -1161,9 +1172,9 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
Path metaDir = createPluginDir(temp);
|
||||
Path pluginDir = metaDir.resolve("fake");
|
||||
Files.createDirectory(pluginDir);
|
||||
writePlugin("fake", pluginDir, false, "requires.keystore", "true");
|
||||
writePlugin("fake", pluginDir, "requires.keystore", "true");
|
||||
String metaZip = createMetaPluginUrl("my_plugins", metaDir);
|
||||
MockTerminal terminal = installPlugin(metaZip, env.v1());
|
||||
installPlugin(metaZip, env.v1());
|
||||
assertTrue(Files.exists(KeyStoreWrapper.keystorePath(env.v2().configFile())));
|
||||
}
|
||||
|
||||
|
@ -1180,4 +1191,45 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
return bytes -> MessageDigests.toHexString(digest.digest(bytes)) + s;
|
||||
}
|
||||
|
||||
public void testMetaPluginPolicyConfirmation() throws Exception {
|
||||
Tuple<Path, Environment> env = createEnv(fs, temp);
|
||||
Path metaDir = createPluginDir(temp);
|
||||
Path fake1Dir = metaDir.resolve("fake1");
|
||||
Files.createDirectory(fake1Dir);
|
||||
writePluginSecurityPolicy(fake1Dir, "setAccessible", "setFactory");
|
||||
writePlugin("fake1", fake1Dir);
|
||||
Path fake2Dir = metaDir.resolve("fake2");
|
||||
Files.createDirectory(fake2Dir);
|
||||
writePluginSecurityPolicy(fake2Dir, "setAccessible", "accessDeclaredMembers");
|
||||
writePlugin("fake2", fake2Dir);
|
||||
String pluginZip = createMetaPluginUrl("meta-plugin", metaDir);
|
||||
|
||||
// default answer, does not install
|
||||
terminal.addTextInput("");
|
||||
UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1()));
|
||||
assertEquals("installation aborted by user", e.getMessage());
|
||||
assertThat(terminal.getOutput(), containsString("WARNING: plugin requires additional permissions"));
|
||||
try (Stream<Path> fileStream = Files.list(env.v2().pluginsFile())) {
|
||||
assertThat(fileStream.collect(Collectors.toList()), empty());
|
||||
}
|
||||
|
||||
// explicitly do not install
|
||||
terminal.reset();
|
||||
terminal.addTextInput("n");
|
||||
e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1()));
|
||||
assertEquals("installation aborted by user", e.getMessage());
|
||||
assertThat(terminal.getOutput(), containsString("WARNING: plugin requires additional permissions"));
|
||||
try (Stream<Path> fileStream = Files.list(env.v2().pluginsFile())) {
|
||||
assertThat(fileStream.collect(Collectors.toList()), empty());
|
||||
}
|
||||
|
||||
// allow installation
|
||||
terminal.reset();
|
||||
terminal.addTextInput("y");
|
||||
installPlugin(pluginZip, env.v1());
|
||||
assertThat(terminal.getOutput(), containsString("WARNING: plugin requires additional permissions"));
|
||||
assertMetaPlugin("meta-plugin", "fake1", metaDir, env.v2());
|
||||
assertMetaPlugin("meta-plugin", "fake2", metaDir, env.v2());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
include::createindex.asciidoc[]
|
||||
|
||||
include::deleteindex.asciidoc[]
|
||||
|
||||
include::open_index.asciidoc[]
|
||||
|
||||
include::close_index.asciidoc[]
|
||||
|
||||
include::putmapping.asciidoc[]
|
||||
|
||||
include::_index.asciidoc[]
|
||||
|
||||
include::get.asciidoc[]
|
||||
|
||||
include::delete.asciidoc[]
|
||||
|
||||
include::update.asciidoc[]
|
||||
|
||||
include::bulk.asciidoc[]
|
||||
|
||||
include::search.asciidoc[]
|
||||
|
||||
include::scroll.asciidoc[]
|
||||
|
||||
include::main.asciidoc[]
|
|
@ -74,10 +74,28 @@ include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-execute]
|
|||
[[java-rest-high-document-bulk-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a bulk request requires both the `BulkRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `BulkRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `BulkResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument and contains a list of individual results for each
|
||||
operation that was executed. Note that one or more operations might have
|
|
@ -66,10 +66,28 @@ include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-execute]
|
|||
[[java-rest-high-document-delete-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a delete request requires both the `DeleteRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `DeleteRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `DeleteResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
|
@ -97,10 +97,28 @@ include-tagged::{doc-tests}/CRUDDocumentationIT.java[get-execute]
|
|||
[[java-rest-high-document-get-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a get request requires both the `GetRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[get-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `GetRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `GetResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[get-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument.
|
||||
<2> Called in case of failure. The raised exception is provided as an argument.
|
|
@ -16,7 +16,8 @@ include-tagged::{doc-tests}/CRUDDocumentationIT.java[index-request-string]
|
|||
<4> Document source provided as a `String`
|
||||
|
||||
==== Providing the document source
|
||||
The document source can be provided in different ways:
|
||||
The document source can be provided in different ways in addition to the
|
||||
`String` example shown above:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
|
@ -104,10 +105,28 @@ include-tagged::{doc-tests}/CRUDDocumentationIT.java[index-execute]
|
|||
[[java-rest-high-document-index-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of an index request requires both the `IndexRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[index-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `IndexRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `IndexResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[index-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
|
@ -185,10 +185,28 @@ include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-execute]
|
|||
[[java-rest-high-document-update-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of an update request requires both the `UpdateRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `UpdateRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `UpdateResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument.
|
||||
<2> Called in case of failure. The raised exception is provided as an argument.
|
|
@ -49,10 +49,28 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-execut
|
|||
[[java-rest-high-close-index-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a close index request requires both the `CloseIndexRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `CloseIndexRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `CloseIndexResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
|
@ -31,6 +31,30 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-reque
|
|||
<1> The type to define
|
||||
<2> The mapping for this type, provided as a JSON string
|
||||
|
||||
The mapping source can be provided in different ways in addition to the
|
||||
`String` example shown above:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-mappings-map]
|
||||
--------------------------------------------------
|
||||
<1> Mapping source provided as a `Map` which gets automatically converted
|
||||
to JSON format
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-mappings-xcontent]
|
||||
--------------------------------------------------
|
||||
<1> Mapping source provided as an `XContentBuilder` object, the Elasticsearch
|
||||
built-in helpers to generate JSON content
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-mappings-shortcut]
|
||||
--------------------------------------------------
|
||||
<1> Mapping source provided as `Object` key-pairs, which gets converted to
|
||||
JSON format
|
||||
|
||||
==== Index aliases
|
||||
Aliases can be set at index creation time
|
||||
|
||||
|
@ -40,6 +64,18 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-reque
|
|||
--------------------------------------------------
|
||||
<1> The alias to define
|
||||
|
||||
==== Providing the whole source
|
||||
|
||||
The whole source including all of its sections (mappings, settings and aliases)
|
||||
can also be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-whole-source]
|
||||
--------------------------------------------------
|
||||
<1> The source provided as a JSON string. It can also be provided as a `Map`
|
||||
or an `XContentBuilder`.
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
|
@ -77,10 +113,28 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-execu
|
|||
[[java-rest-high-create-index-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a create index request requires both the `CreateIndexRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `CreateIndexRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `CreateIndexResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
|
@ -47,10 +47,28 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[delete-index-execu
|
|||
[[java-rest-high-delete-index-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a delete index request requires both the `DeleteIndexRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[delete-index-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `DeleteIndexRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `DeleteIndexResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[delete-index-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
|
@ -0,0 +1,87 @@
|
|||
[[java-rest-high-exists-alias]]
|
||||
=== Exists Alias API
|
||||
|
||||
[[java-rest-high-exists-alias-request]]
|
||||
==== Exists Alias Request
|
||||
|
||||
The Exists Alias API uses `GetAliasesRequest` as its request object.
|
||||
One or more aliases can be optionally provided either at construction
|
||||
time or later on through the relevant setter method.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-request]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-request-alias]
|
||||
--------------------------------------------------
|
||||
<1> One or more aliases to look for
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-request-indices]
|
||||
--------------------------------------------------
|
||||
<1> The index or indices that the alias is associated with
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
|
||||
how wildcard expressions are expanded
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-request-local]
|
||||
--------------------------------------------------
|
||||
<1> The `local` flag (defaults to `false`) controls whether the aliases need
|
||||
to be looked up in the local cluster state or in the cluster state held by
|
||||
the elected master node.
|
||||
|
||||
[[java-rest-high-exists-alias-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-exists-alias-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a exists alias request requires both a `GetAliasesRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `GetAliasesRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for the `Boolean` response looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-exists-alias-response]]
|
||||
==== Exists Alias Response
|
||||
|
||||
The Exists Alias API returns a `boolean` that indicates whether the provided
|
||||
alias (or aliases) was found or not.
|
|
@ -0,0 +1,71 @@
|
|||
[[java-rest-high-indices-exists]]
|
||||
=== Indices Exists API
|
||||
|
||||
[[java-rest-high-indices-exists-request]]
|
||||
==== Indices Exists Request
|
||||
|
||||
The high-level REST client uses a `GetIndexRequest` for Indices Exists API. The index name (or indices' names) are required.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[indices-exists-request]
|
||||
--------------------------------------------------
|
||||
<1> Index
|
||||
|
||||
[[java-rest-high-indices-exists-optional-args]]
|
||||
==== Optional arguments
|
||||
Indices Exists API also accepts following optional arguments, through a `GetIndexRequest`:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[indices-exists-request-optionals]
|
||||
--------------------------------------------------
|
||||
<1> Whether to return local information or retrieve the state from master node
|
||||
<2> Return result in a format suitable for humans
|
||||
<3> Whether to return all default setting for each of the indices
|
||||
<4> Return settings in flat format
|
||||
<5> Controls how unavailable indices are resolved and how wildcard expressions are expanded
|
||||
|
||||
[[java-rest-high-indices-sync]]
|
||||
==== Synchronous Execution
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[indices-exists-response]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-indices-async]]
|
||||
==== Asynchronous Execution
|
||||
The asynchronous execution of an indices exists request requires both the
|
||||
`GetIndexRequest` instance and an `ActionListener` instance to be passed
|
||||
to the asynchronous method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[indices-exists-async]
|
||||
--------------------------------------------------
|
||||
<1> The `GetIndexRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for the Indices Exists looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[indices-exists-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-indices-exists-response]]
|
||||
==== Response
|
||||
The response is a `boolean` value, indicating whether the index (or indices) exist:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[indices-exists-response]
|
||||
--------------------------------------------------
|
|
@ -58,10 +58,28 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-execute
|
|||
[[java-rest-high-open-index-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of an open index request requires both the `OpenIndexRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `OpenIndexRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `OpenIndexResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
|
@ -20,7 +20,32 @@ A description of the fields to create on the mapping; if not defined, the mappin
|
|||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-request-source]
|
||||
--------------------------------------------------
|
||||
<1> The mapping source
|
||||
<1> The mapping source provided as a `String`
|
||||
|
||||
==== Providing the mapping source
|
||||
The mapping source can be provided in different ways in addition to
|
||||
the `String` example shown above:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-map]
|
||||
--------------------------------------------------
|
||||
<1> Mapping source provided as a `Map` which gets automatically converted
|
||||
to JSON format
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-xcontent]
|
||||
--------------------------------------------------
|
||||
<1> Mapping source provided as an `XContentBuilder` object, the Elasticsearch
|
||||
built-in helpers to generate JSON content
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-shortcut]
|
||||
--------------------------------------------------
|
||||
<1> Mapping source provided as `Object` key-pairs, which gets converted to
|
||||
JSON format
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
@ -50,10 +75,28 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-execut
|
|||
[[java-rest-high-put-mapping-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a put mappings request requires both the `PutMappingRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `PutMappingRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `PutMappingResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
|
@ -0,0 +1,108 @@
|
|||
[[java-rest-high-shrink-index]]
|
||||
=== Shrink Index API
|
||||
|
||||
[[java-rest-high-shrink-index-request]]
|
||||
==== Resize Request
|
||||
|
||||
The Shrink API requires a `ResizeRequest` instance.
|
||||
A `ResizeRequest` requires two string arguments:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[shrink-index-request]
|
||||
--------------------------------------------------
|
||||
<1> The target index (first argument) to shrink the source index (second argument) into
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[shrink-index-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the index is opened
|
||||
as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the index is opened
|
||||
as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[shrink-index-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[shrink-index-request-waitForActiveShards]
|
||||
--------------------------------------------------
|
||||
<1> The number of active shard copies to wait for before the shrink index API
|
||||
returns a response, as an `int`
|
||||
<2> The number of active shard copies to wait for before the shrink index API
|
||||
returns a response, as an `ActiveShardCount`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[shrink-index-request-settings]
|
||||
--------------------------------------------------
|
||||
<1> The settings to apply to the target index, which include the number of
|
||||
shards to create for it
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[shrink-index-request-aliases]
|
||||
--------------------------------------------------
|
||||
<1> The aliases to associate the target index with
|
||||
|
||||
[[java-rest-high-shrink-index-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[shrink-index-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-shrink-index-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a shrink index request requires both the `ResizeRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[shrink-index-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `ResizeRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `ResizeResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[shrink-index-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-shrink-index-response]]
|
||||
==== Shrink Index Response
|
||||
|
||||
The returned `ResizeResponse` allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[shrink-index-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates whether all of the nodes have acknowledged the request
|
||||
<2> Indicates whether the requisite number of shard copies were started for
|
||||
each shard in the index before timing out
|
||||
|
||||
|
|
@ -0,0 +1,109 @@
|
|||
[[java-rest-high-split-index]]
|
||||
=== Split Index API
|
||||
|
||||
[[java-rest-high-split-index-request]]
|
||||
==== Resize Request
|
||||
|
||||
The Split API requires a `ResizeRequest` instance.
|
||||
A `ResizeRequest` requires two string arguments:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-request]
|
||||
--------------------------------------------------
|
||||
<1> The target index (first argument) to split the source index (second argument) into
|
||||
<2> The resize type needs to be set to `SPLIT`
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the index is opened
|
||||
as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the index is opened
|
||||
as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-request-waitForActiveShards]
|
||||
--------------------------------------------------
|
||||
<1> The number of active shard copies to wait for before the split index API
|
||||
returns a response, as an `int`.
|
||||
<2> The number of active shard copies to wait for before the split index API
|
||||
returns a response, as an `ActiveShardCount`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-request-settings]
|
||||
--------------------------------------------------
|
||||
<1> The settings to apply to the target index, which include the number of
|
||||
shards to create for it.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-request-aliases]
|
||||
--------------------------------------------------
|
||||
<1> The aliases to associate the target index with.
|
||||
|
||||
[[java-rest-high-split-index-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-split-index-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a split index request requires both the `ResizeRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `ResizeRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `ResizeResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-split-index-response]]
|
||||
==== Split Index Response
|
||||
|
||||
The returned `ResizeResponse` allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[split-index-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates whether all of the nodes have acknowledged the request
|
||||
<2> Indicates whether the requisite number of shard copies were started for
|
||||
each shard in the index before timing out
|
||||
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
[[java-rest-high-update-aliases]]
|
||||
=== Update Aliases API
|
||||
|
||||
[[java-rest-high-update-aliases-request]]
|
||||
==== Indices Aliases Request
|
||||
|
||||
The Update Aliases API allows aliasing an index with a name, with all APIs
|
||||
automatically converting the alias name to the actual index name.
|
||||
|
||||
An `IndicesAliasesRequest` must have at least one `AliasActions`:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-request]
|
||||
--------------------------------------------------
|
||||
<1> Creates an `IndicesAliasesRequest`
|
||||
<2> Creates an `AliasActions` that aliases index `test1` with `alias1`
|
||||
<3> Adds the alias action to the request
|
||||
|
||||
The following action types are supported: `add` - alias an index, `remove` -
|
||||
removes the alias associated with the index, and `remove_index` - deletes the
|
||||
index.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-request2]
|
||||
--------------------------------------------------
|
||||
<1> Creates an alias `alias1` with an optional filter on field `year`
|
||||
<2> Creates an alias `alias2` associated with two indices and with an optional routing
|
||||
<3> Removes the associated alias `alias3`
|
||||
<4> `remove_index` is just like <<java-rest-high-delete-index>>
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the operation as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the operation as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
[[java-rest-high-update-aliases-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-update-aliases-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of an update index aliases request requires both the `IndicesAliasesRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `IndicesAliasesRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `IndicesAliasesResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-update-aliases-response]]
|
||||
==== Indices Aliases Response
|
||||
|
||||
The returned `IndicesAliasesResponse` allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates whether all of the nodes have acknowledged the request
|
|
@ -28,5 +28,5 @@ to compute with a search query in <<java-rest-high-document-search-request-build
|
|||
The <<java-rest-high-aggregation-builders, Building Aggregations>> page gives a list of all available
|
||||
aggregations with their corresponding `AggregationBuilder` objects and `AggregationBuilders` helper methods.
|
||||
|
||||
include::builders/queries.asciidoc[]
|
||||
include::builders/aggs.asciidoc[]
|
||||
include::query-builders.asciidoc[]
|
||||
include::aggs-builders.asciidoc[]
|
||||
|
|
|
@ -84,10 +84,28 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[search-scroll-execute-syn
|
|||
[[java-rest-high-search-scroll-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a search scroll request requires both the `SearchScrollRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SearchDocumentationIT.java[search-scroll-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `SearchScrollRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `SearchResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SearchDocumentationIT.java[search-scroll-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
@ -162,10 +180,28 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[clear-scroll-execute]
|
|||
[[java-rest-high-clear-scroll-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a clear scroll request requires both the `ClearScrollRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SearchDocumentationIT.java[clear-scroll-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `ClearScrollRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `ClearScrollResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SearchDocumentationIT.java[clear-scroll-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
|
@ -241,15 +241,29 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[search-execute]
|
|||
[[java-rest-high-document-search-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
|
||||
Executing a `SearchRequest` can also be done in an asynchronous fashion so that
|
||||
the client can return directly. Users need to specify how the response or
|
||||
potential failures will be handled by passing in appropriate listeners:
|
||||
potential failures will be handled by passing the request and a listeners to the
|
||||
asynchronous search method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SearchDocumentationIT.java[search-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `SearchRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `SearchResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SearchDocumentationIT.java[search-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed.
|
||||
<2> Called when the whole `SearchRequest` fails.
|
||||
|
|
@ -1,30 +1,73 @@
|
|||
[[java-rest-high-supported-apis]]
|
||||
== Supported APIs
|
||||
|
||||
The Java High Level REST Client supports the following APIs:
|
||||
== Document APIs
|
||||
|
||||
Indices APIs::
|
||||
* <<java-rest-high-create-index>>
|
||||
* <<java-rest-high-delete-index>>
|
||||
* <<java-rest-high-open-index>>
|
||||
* <<java-rest-high-close-index>>
|
||||
* <<java-rest-high-put-mapping>>
|
||||
The Java High Level REST Client supports the following Document APIs:
|
||||
|
||||
[[single-doc]]
|
||||
Single document APIs::
|
||||
* <<java-rest-high-document-index>>
|
||||
* <<java-rest-high-document-get>>
|
||||
* <<java-rest-high-document-delete>>
|
||||
* <<java-rest-high-document-update>>
|
||||
|
||||
Multi document APIs::
|
||||
[[multi-doc]]
|
||||
Multi-document APIs::
|
||||
* <<java-rest-high-document-bulk>>
|
||||
|
||||
Search APIs::
|
||||
include::document/index.asciidoc[]
|
||||
include::document/get.asciidoc[]
|
||||
include::document/delete.asciidoc[]
|
||||
include::document/update.asciidoc[]
|
||||
include::document/bulk.asciidoc[]
|
||||
|
||||
== Search APIs
|
||||
|
||||
The Java High Level REST Client supports the following Search APIs:
|
||||
|
||||
* <<java-rest-high-search>>
|
||||
* <<java-rest-high-search-scroll>>
|
||||
* <<java-rest-high-clear-scroll>>
|
||||
|
||||
Miscellaneous APIs::
|
||||
include::search/search.asciidoc[]
|
||||
include::search/scroll.asciidoc[]
|
||||
|
||||
== Miscellaneous APIs
|
||||
|
||||
The Java High Level REST Client supports the following Miscellaneous APIs:
|
||||
|
||||
* <<java-rest-high-main>>
|
||||
|
||||
include::apis/index.asciidoc[]
|
||||
include::miscellaneous/main.asciidoc[]
|
||||
|
||||
== Indices APIs
|
||||
|
||||
The Java High Level REST Client supports the following Indices APIs:
|
||||
|
||||
Index Management::
|
||||
* <<java-rest-high-create-index>>
|
||||
* <<java-rest-high-delete-index>>
|
||||
* <<java-rest-high-indices-exists>>
|
||||
* <<java-rest-high-open-index>>
|
||||
* <<java-rest-high-close-index>>
|
||||
* <<java-rest-high-shrink-index>>
|
||||
* <<java-rest-high-split-index>>
|
||||
|
||||
Mapping Management::
|
||||
* <<java-rest-high-put-mapping>>
|
||||
|
||||
Alias Management::
|
||||
* <<java-rest-high-update-aliases>>
|
||||
* <<java-rest-high-exists-alias>>
|
||||
|
||||
include::indices/create_index.asciidoc[]
|
||||
include::indices/delete_index.asciidoc[]
|
||||
include::indices/indices_exists.asciidoc[]
|
||||
include::indices/open_index.asciidoc[]
|
||||
include::indices/close_index.asciidoc[]
|
||||
include::indices/shrink_index.asciidoc[]
|
||||
include::indices/split_index.asciidoc[]
|
||||
include::indices/put_mapping.asciidoc[]
|
||||
include::indices/update_aliases.asciidoc[]
|
||||
include::indices/exists_alias.asciidoc[]
|
||||
|
||||
|
|
|
@ -5,8 +5,10 @@
|
|||
|
||||
The Elasticsearch repository contains examples of:
|
||||
|
||||
* a https://github.com/elastic/elasticsearch/tree/master/plugins/jvm-example[Java plugin]
|
||||
which contains Java code.
|
||||
* a https://github.com/elastic/elasticsearch/tree/master/plugins/custom-settings[Java plugin]
|
||||
which contains a plugin with custom settings.
|
||||
* a https://github.com/elastic/elasticsearch/tree/master/plugins/rest-handler[Java plugin]
|
||||
which contains a plugin that registers a Rest handler.
|
||||
* a https://github.com/elastic/elasticsearch/tree/master/plugins/examples/rescore[Java plugin]
|
||||
which contains a rescore plugin.
|
||||
* a https://github.com/elastic/elasticsearch/tree/master/plugins/examples/script-expert-scoring[Java plugin]
|
||||
|
|
|
@ -66,6 +66,7 @@ built-in authentication mechanism, you can authenticate on the Storage service u
|
|||
https://cloud.google.com/iam/docs/overview#service_account[Service Account] file.
|
||||
|
||||
To create a service account file:
|
||||
|
||||
1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]
|
||||
2. Select your project
|
||||
3. Got to the https://console.cloud.google.com/permissions[Permission] tab
|
||||
|
|
|
@ -21,6 +21,8 @@ include::bucket/adjacency-matrix-aggregation.asciidoc[]
|
|||
|
||||
include::bucket/children-aggregation.asciidoc[]
|
||||
|
||||
include::bucket/composite-aggregation.asciidoc[]
|
||||
|
||||
include::bucket/datehistogram-aggregation.asciidoc[]
|
||||
|
||||
include::bucket/daterange-aggregation.asciidoc[]
|
||||
|
@ -57,5 +59,3 @@ include::bucket/significanttext-aggregation.asciidoc[]
|
|||
|
||||
include::bucket/terms-aggregation.asciidoc[]
|
||||
|
||||
include::bucket/composite-aggregation.asciidoc[]
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[search-aggregations-bucket-composite-aggregation]]
|
||||
=== Composite Aggregation
|
||||
|
||||
experimental[]
|
||||
beta[]
|
||||
|
||||
A multi-bucket aggregation that creates composite buckets from different sources.
|
||||
|
||||
|
|
|
@ -105,6 +105,7 @@ Response:
|
|||
By default, the `terms` aggregation will return the buckets for the top ten terms ordered by the `doc_count`. One can
|
||||
change this default behaviour by setting the `size` parameter.
|
||||
|
||||
[[search-aggregations-bucket-terms-aggregation-size]]
|
||||
==== Size
|
||||
|
||||
The `size` parameter can be set to define how many term buckets should be returned out of the overall terms list. By
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[analysis-synonym-graph-tokenfilter]]
|
||||
=== Synonym Graph Token Filter
|
||||
|
||||
experimental[This functionality is marked as experimental in Lucene]
|
||||
beta[]
|
||||
|
||||
The `synonym_graph` token filter allows to easily handle synonyms,
|
||||
including multi-word synonyms correctly during the analysis process.
|
||||
|
|
|
@ -27,10 +27,10 @@ U7321H6 discovery-gce {version} The Google Compute Engine (GCE) Discov
|
|||
U7321H6 ingest-attachment {version} Ingest processor that uses Apache Tika to extract contents
|
||||
U7321H6 ingest-geoip {version} Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database
|
||||
U7321H6 ingest-user-agent {version} Ingest processor that extracts information from a user agent
|
||||
U7321H6 jvm-example {version} Demonstrates all the pluggable Java entry points in Elasticsearch
|
||||
U7321H6 mapper-murmur3 {version} The Mapper Murmur3 plugin allows to compute hashes of a field's values at index-time and to store them in the index.
|
||||
U7321H6 mapper-size {version} The Mapper Size plugin allows document to record their uncompressed size at index time.
|
||||
U7321H6 store-smb {version} The Store SMB plugin adds support for SMB stores.
|
||||
U7321H6 transport-nio {version} The nio transport.
|
||||
------------------------------------------------------------------------------
|
||||
// TESTRESPONSE[s/([.()])/\\$1/ s/U7321H6/.+/ _cat]
|
||||
|
||||
|
|
|
@ -2,10 +2,14 @@
|
|||
== Multi Get API
|
||||
|
||||
Multi GET API allows to get multiple documents based on an index, type
|
||||
(optional) and id (and possibly routing). The response includes a `docs`
|
||||
array with all the fetched documents, each element similar in structure
|
||||
to a document provided by the <<docs-get,get>>
|
||||
API. Here is an example:
|
||||
(optional) and id (and possibly routing). The response includes a `docs` array
|
||||
with all the fetched documents in order corresponding to the original multi-get
|
||||
request (if there was a failure for a specific get, an object containing this
|
||||
error is included in place in the response instead). The structure of a
|
||||
successful get is similar in structure to a document provided by the
|
||||
<<docs-get,get>> API.
|
||||
|
||||
Here is an example:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -35,6 +35,44 @@ several times slower and <<mapping-parent-field,parent-child>> relations can mak
|
|||
queries hundreds of times slower. So if the same questions can be answered without
|
||||
joins by denormalizing documents, significant speedups can be expected.
|
||||
|
||||
[float]
|
||||
=== Search as few fields as possible
|
||||
|
||||
The more fields a <<query-dsl-query-string-query,`query_string`>> or
|
||||
<<query-dsl-multi-match-query,`multi_match`>> query targets, the slower it is.
|
||||
A common technique to improve search speed over multiple fields is to copy
|
||||
their values into a single field at index time, and then use this field at
|
||||
search time. This can be automated with the <<copy-to,`copy-to`>> directive of
|
||||
mappings without having to change the source of documents. Here is an example
|
||||
of an index containing movies that optimizes queries that search over both the
|
||||
name and the plot of the movie by indexing both values into the `name_and_plot`
|
||||
field.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT movies
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"name_and_plot": {
|
||||
"type": "text"
|
||||
},
|
||||
"name": {
|
||||
"type": "text",
|
||||
"copy_to": "name_and_plot"
|
||||
},
|
||||
"plot": {
|
||||
"type": "text",
|
||||
"copy_to": "name_and_plot"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
[float]
|
||||
=== Pre-index data
|
||||
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
[[elasticsearch-reference]]
|
||||
= Elasticsearch Reference
|
||||
|
||||
:include-xpack: true
|
||||
:es-test-dir: {docdir}/../src/test
|
||||
:plugins-examples-dir: {docdir}/../../plugins/examples
|
||||
:xes-repo-dir: {docdir}/../../../elasticsearch-extra/x-pack-elasticsearch/docs/{lang}
|
||||
:es-repo-dir: {docdir}
|
||||
|
||||
|
||||
include::../Versions.asciidoc[]
|
||||
include::{xes-repo-dir}/index.asciidoc[]
|
|
@ -12,3 +12,6 @@ HEAD twitter
|
|||
|
||||
The HTTP status code indicates if the index exists or not. A `404` means
|
||||
it does not exist, and `200` means it does.
|
||||
|
||||
IMPORTANT: This request does not distinguish between an index and an alias,
|
||||
i.e. status code `200` is also returned if an alias exists with that name.
|
||||
|
|
|
@ -1016,7 +1016,7 @@ understands this to mean `2016-04-01` as is explained in the <<date-math-index-n
|
|||
| `field` | yes | - | The field to get the date or timestamp from.
|
||||
| `index_name_prefix` | no | - | A prefix of the index name to be prepended before the printed date.
|
||||
| `date_rounding` | yes | - | How to round the date when formatting the date into the index name. Valid values are: `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second).
|
||||
| `date_formats ` | no | yyyy-MM-dd'T'HH:mm:ss.SSSZ | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N.
|
||||
| `date_formats` | no | yyyy-MM-dd'T'HH:mm:ss.SSSZ | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N.
|
||||
| `timezone` | no | UTC | The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names.
|
||||
| `locale` | no | ENGLISH | The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days.
|
||||
| `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. An valid Joda pattern is expected here.
|
||||
|
|
|
@ -50,3 +50,6 @@ GET my_index/_search
|
|||
|
||||
IMPORTANT: The `null_value` needs to be the same datatype as the field. For
|
||||
instance, a `long` field cannot have a string `null_value`.
|
||||
|
||||
NOTE: The `null_value` only influences how data is indexed, it doesn't modify
|
||||
the `_source` document.
|
||||
|
|
|
@ -62,8 +62,10 @@ The following parameters are accepted by `keyword` fields:
|
|||
|
||||
<<ignore-above,`ignore_above`>>::
|
||||
|
||||
Do not index any string longer than this value. Defaults to
|
||||
`2147483647` so that all values would be accepted.
|
||||
Do not index any string longer than this value. Defaults to `2147483647`
|
||||
so that all values would be accepted. Please however note that default
|
||||
dynamic mapping rules create a sub `keyword` field that overrides this
|
||||
default by setting `ignore_above: 256`.
|
||||
|
||||
<<mapping-index,`index`>>::
|
||||
|
||||
|
|
|
@ -47,6 +47,11 @@ PUT range_index/_doc/1
|
|||
--------------------------------------------------
|
||||
//CONSOLE
|
||||
|
||||
<1> `date_range` types accept the same field parameters defined by the <<date, `date`>> type.
|
||||
<2> Example indexing a meeting with 10 to 20 attendees.
|
||||
<3> Date ranges accept the same format as described in <<ranges-on-dates, date range queries>>.
|
||||
<4> Example date range using date time stamp. This also accepts <<date-math, date math>> formatting, or "now" for system time.
|
||||
|
||||
The following is an example of a <<query-dsl-term-query, term query>> on the `integer_range` field named "expected_attendees".
|
||||
|
||||
[source,js]
|
||||
|
@ -102,7 +107,6 @@ The result produced by the above query.
|
|||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"took": 13/"took" : $body.took/]
|
||||
|
||||
|
||||
The following is an example of a `date_range` query over the `date_range` field named "time_frame".
|
||||
|
||||
[source,js]
|
||||
|
@ -111,10 +115,10 @@ GET range_index/_search
|
|||
{
|
||||
"query" : {
|
||||
"range" : {
|
||||
"time_frame" : { <5>
|
||||
"time_frame" : { <1>
|
||||
"gte" : "2015-10-31",
|
||||
"lte" : "2015-11-01",
|
||||
"relation" : "within" <6>
|
||||
"relation" : "within" <2>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -123,12 +127,8 @@ GET range_index/_search
|
|||
// CONSOLE
|
||||
// TEST[setup:range_index]
|
||||
|
||||
<1> `date_range` types accept the same field parameters defined by the <<date, `date`>> type.
|
||||
<2> Example indexing a meeting with 10 to 20 attendees.
|
||||
<3> Date ranges accept the same format as described in <<ranges-on-dates, date range queries>>.
|
||||
<4> Example date range using date time stamp. This also accepts <<date-math, date math>> formatting, or "now" for system time.
|
||||
<5> Range queries work the same as described in <<query-dsl-range-query, range query>>.
|
||||
<6> Range queries over range <<mapping-types, fields>> support a `relation` parameter which can be one of `WITHIN`, `CONTAINS`,
|
||||
<1> Range queries work the same as described in <<query-dsl-range-query, range query>>.
|
||||
<2> Range queries over range <<mapping-types, fields>> support a `relation` parameter which can be one of `WITHIN`, `CONTAINS`,
|
||||
`INTERSECTS` (default).
|
||||
|
||||
This query produces a similar result:
|
||||
|
|
|
@ -89,6 +89,13 @@ The following parameters are accepted by `text` fields:
|
|||
What information should be stored in the index, for search and highlighting purposes.
|
||||
Defaults to `positions`.
|
||||
|
||||
<<index-prefix-config,`index_prefix`>>::
|
||||
|
||||
If enabled, term prefixes of between 2 and 5 characters are indexed into a
|
||||
separate field. This allows prefix searches to run more efficiently, at
|
||||
the expense of a larger index. Accepts an
|
||||
<<index-prefix-config,`index-prefix configuration block`>>
|
||||
|
||||
<<norms,`norms`>>::
|
||||
|
||||
Whether field-length should be taken into account when scoring queries.
|
||||
|
@ -128,3 +135,32 @@ The following parameters are accepted by `text` fields:
|
|||
Whether term vectors should be stored for an <<mapping-index,`analyzed`>>
|
||||
field. Defaults to `no`.
|
||||
|
||||
[[index-prefix-config]]
|
||||
==== Index Prefix configuration
|
||||
|
||||
Text fields may also index term prefixes to speed up prefix searches. The `index_prefix`
|
||||
parameter is configured as below. Either or both of `min_chars` and `max_chars` may be excluded.
|
||||
Both values are treated as inclusive
|
||||
|
||||
[source,js]
|
||||
--------------------------------
|
||||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"full_name": {
|
||||
"type": "text",
|
||||
"index_prefix" : {
|
||||
"min_chars" : 1, <1>
|
||||
"max_chars" : 10 <2>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------
|
||||
// CONSOLE
|
||||
<1> `min_chars` must be greater than zero, defaults to 2
|
||||
<2> `max_chars` must be greater than or equal to `min_chars` and less than 20, defaults to 5
|
|
@ -31,6 +31,8 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x.
|
|||
* <<breaking_70_plugins_changes>>
|
||||
* <<breaking_70_analysis_changes>>
|
||||
* <<breaking_70_api_changes>>
|
||||
* <<breaking_70_java_changes>>
|
||||
* <<breaking_70_settings_changes>>
|
||||
|
||||
|
||||
include::migrate_7_0/aggregations.asciidoc[]
|
||||
|
@ -41,3 +43,5 @@ include::migrate_7_0/mappings.asciidoc[]
|
|||
include::migrate_7_0/search.asciidoc[]
|
||||
include::migrate_7_0/plugins.asciidoc[]
|
||||
include::migrate_7_0/api.asciidoc[]
|
||||
include::migrate_7_0/java.asciidoc[]
|
||||
include::migrate_7_0/settings.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
[[breaking_70_java_changes]]
|
||||
=== Java API changes
|
||||
|
||||
==== `isShardsAcked` deprecated in `6.2` has been removed
|
||||
|
||||
`isShardsAcked` has been replaced by `isShardsAcknowledged` in
|
||||
`CreateIndexResponse`, `RolloverResponse` and
|
||||
`CreateIndexClusterStateUpdateResponse`.
|
|
@ -0,0 +1,8 @@
|
|||
[[breaking_70_settings_changes]]
|
||||
|
||||
=== Settings changes
|
||||
|
||||
==== Percolator
|
||||
|
||||
* The deprecated `index.percolator.map_unmapped_fields_as_string` setting has been removed in favour of
|
||||
the `index.percolator.map_unmapped_fields_as_text` setting.
|
|
@ -73,10 +73,6 @@ The modules in this section are:
|
|||
Configure the transport networking layer, used internally by Elasticsearch
|
||||
to communicate between nodes.
|
||||
|
||||
<<modules-tribe,Tribe nodes>>::
|
||||
|
||||
A tribe node joins one or more clusters and acts as a federated
|
||||
client across them.
|
||||
|
||||
<<modules-cross-cluster-search, Cross cluster Search>>::
|
||||
|
||||
|
@ -110,6 +106,4 @@ include::modules/threadpool.asciidoc[]
|
|||
|
||||
include::modules/transport.asciidoc[]
|
||||
|
||||
include::modules/tribe.asciidoc[]
|
||||
|
||||
include::modules/cross-cluster-search.asciidoc[]
|
||||
|
|
|
@ -65,4 +65,3 @@ PUT _cluster/settings
|
|||
}
|
||||
------------------------
|
||||
// CONSOLE
|
||||
// TEST[skip:indexes don't assign]
|
||||
|
|
|
@ -97,4 +97,3 @@ PUT _cluster/settings
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
== Cross Cluster Search
|
||||
|
||||
The _cross cluster search_ feature allows any node to act as a federated client across
|
||||
multiple clusters. In contrast to the <<modules-tribe,tribe node>> feature, a cross cluster search node won't
|
||||
join the remote cluster, instead it connects to a remote cluster in a light fashion in order to execute
|
||||
multiple clusters. A cross cluster search node won't join the remote cluster, instead
|
||||
it connects to a remote cluster in a light fashion in order to execute
|
||||
federated search requests.
|
||||
|
||||
Cross cluster search works by configuring a remote cluster in the cluster state and connecting only to a
|
||||
|
|
|
@ -171,9 +171,8 @@ settings, but may be further configured independently:
|
|||
TCP Transport::
|
||||
|
||||
Used for communication between nodes in the cluster, by the Java
|
||||
{javaclient}/transport-client.html[Transport client] and by the
|
||||
<<modules-tribe,Tribe node>>. See the <<modules-transport,Transport module>>
|
||||
for more information.
|
||||
{javaclient}/transport-client.html[Transport client].
|
||||
See the <<modules-transport,Transport module>> for more information.
|
||||
|
||||
HTTP::
|
||||
|
||||
|
|
|
@ -35,17 +35,6 @@ and enrich the document before indexing. With a heavy ingest load, it makes
|
|||
sense to use dedicated ingest nodes and to mark the master and data nodes as
|
||||
`node.ingest: false`.
|
||||
|
||||
<<modules-tribe,Tribe node>>::
|
||||
|
||||
A tribe node, configured via the `tribe.*` settings, is a special type of
|
||||
coordinating only node that can connect to multiple clusters and perform
|
||||
search and other operations across all connected clusters.
|
||||
|
||||
By default a node is a master-eligible node and a data node, plus it can
|
||||
pre-process documents through ingest pipelines. This is very convenient for
|
||||
small clusters but, as the cluster grows, it becomes important to consider
|
||||
separating dedicated master-eligible nodes from dedicated data nodes.
|
||||
|
||||
[NOTE]
|
||||
[[coordinating-node]]
|
||||
.Coordinating node
|
||||
|
|
|
@ -1,120 +0,0 @@
|
|||
[[modules-tribe]]
|
||||
== Tribe node
|
||||
|
||||
deprecated[5.4.0,The `tribe` node is deprecated in favour of <<modules-cross-cluster-search>> and will be removed in Elasticsearch 7.0.]
|
||||
|
||||
The _tribes_ feature allows a _tribe node_ to act as a federated client across
|
||||
multiple clusters.
|
||||
|
||||
The tribe node works by retrieving the cluster state from all connected
|
||||
clusters and merging them into a global cluster state. With this information
|
||||
at hand, it is able to perform read and write operations against the nodes in
|
||||
all clusters as if they were local. Note that a tribe node needs to be able
|
||||
to connect to each single node in every configured cluster.
|
||||
|
||||
The `elasticsearch.yml` config file for a tribe node just needs to list the
|
||||
clusters that should be joined, for instance:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------
|
||||
tribe:
|
||||
t1: <1>
|
||||
cluster.name: cluster_one
|
||||
t2: <1>
|
||||
cluster.name: cluster_two
|
||||
--------------------------------
|
||||
<1> `t1` and `t2` are arbitrary names representing the connection to each
|
||||
cluster.
|
||||
|
||||
The example above configures connections to two clusters, name `t1` and `t2`
|
||||
respectively. The tribe node will create a <<modules-node,node client>> to
|
||||
connect each cluster using <<unicast,unicast discovery>> by default. Any
|
||||
other settings for the connection can be configured under `tribe.{name}`, just
|
||||
like the `cluster.name` in the example.
|
||||
|
||||
The merged global cluster state means that almost all operations work in the
|
||||
same way as a single cluster: distributed search, suggest, percolation,
|
||||
indexing, etc.
|
||||
|
||||
However, there are a few exceptions:
|
||||
|
||||
* The merged view cannot handle indices with the same name in multiple
|
||||
clusters. By default it will pick one of them, see later for on_conflict options.
|
||||
|
||||
* Master level read operations (eg <<cluster-state>>, <<cluster-health>>)
|
||||
will automatically execute with a local flag set to true since there is
|
||||
no master.
|
||||
|
||||
* Master level write operations (eg <<indices-create-index>>) are not
|
||||
allowed. These should be performed on a single cluster.
|
||||
|
||||
The tribe node can be configured to block all write operations and all
|
||||
metadata operations with:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------
|
||||
tribe:
|
||||
blocks:
|
||||
write: true
|
||||
metadata: true
|
||||
--------------------------------
|
||||
|
||||
The tribe node can also configure blocks on selected indices:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------
|
||||
tribe:
|
||||
blocks:
|
||||
write.indices: hk*,ldn*
|
||||
metadata.indices: hk*,ldn*
|
||||
--------------------------------
|
||||
|
||||
When there is a conflict and multiple clusters hold the same index, by default
|
||||
the tribe node will pick one of them. This can be configured using the `tribe.on_conflict`
|
||||
setting. It defaults to `any`, but can be set to `drop` (drop indices that have
|
||||
a conflict), or `prefer_[tribeName]` to prefer the index from a specific tribe.
|
||||
|
||||
[float]
|
||||
=== Tribe node settings
|
||||
|
||||
The tribe node starts a node client for each listed cluster. The following
|
||||
configuration options are passed down from the tribe node to each node client:
|
||||
|
||||
* `node.name` (used to derive the `node.name` for each node client)
|
||||
* `network.host`
|
||||
* `network.bind_host`
|
||||
* `network.publish_host`
|
||||
* `transport.host`
|
||||
* `transport.bind_host`
|
||||
* `transport.publish_host`
|
||||
* `path.home`
|
||||
* `path.logs`
|
||||
* `shield.*`
|
||||
|
||||
Almost any setting (except for `path.*`) may be configured at the node client
|
||||
level itself, in which case it will override any passed through setting from
|
||||
the tribe node. Settings you may want to set at the node client level
|
||||
include:
|
||||
|
||||
* `network.host`
|
||||
* `network.bind_host`
|
||||
* `network.publish_host`
|
||||
* `transport.host`
|
||||
* `transport.bind_host`
|
||||
* `transport.publish_host`
|
||||
* `cluster.name`
|
||||
* `discovery.zen.ping.unicast.hosts`
|
||||
|
||||
[source,yaml]
|
||||
------------------------
|
||||
network.host: 192.168.1.5 <1>
|
||||
|
||||
tribe:
|
||||
t1:
|
||||
cluster.name: cluster_one
|
||||
t2:
|
||||
cluster.name: cluster_two
|
||||
network.host: 10.1.2.3 <2>
|
||||
------------------------
|
||||
<1> The `network.host` setting is inherited by `t1`.
|
||||
<2> The `t3` node client overrides the inherited from the tribe node.
|
|
@ -15,6 +15,10 @@ GET /_search
|
|||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
NOTE: Highlighting `terms` queries is best-effort only, so terms of a `terms`
|
||||
query might not be highlighted depending on the highlighter implementation that
|
||||
is selected and on the number of terms in the `terms` query.
|
||||
|
||||
[float]
|
||||
[[query-dsl-terms-lookup]]
|
||||
===== Terms lookup mechanism
|
||||
|
|
|
@ -7,4 +7,5 @@ The changes listed below have been released for the first time in Elasticsearch
|
|||
[float]
|
||||
=== Breaking changes
|
||||
|
||||
No breaking changes have been made (yet)
|
||||
Core::
|
||||
* Tribe node has been removed in favor of Cross-Cluster-Search
|
|
@ -283,8 +283,10 @@ that shows potential errors of individual queries. The response has the followin
|
|||
}, [...]
|
||||
],
|
||||
"metric_details": { <6>
|
||||
"relevant_docs_retrieved": 6,
|
||||
"docs_retrieved": 10
|
||||
"precision" : {
|
||||
"relevant_docs_retrieved": 6,
|
||||
"docs_retrieved": 10
|
||||
}
|
||||
}
|
||||
},
|
||||
"my_query_id2" : { [...] }
|
||||
|
|
|
@ -86,6 +86,12 @@ And here is a sample response:
|
|||
aggregations and suggestions (no top hits returned).
|
||||
See <<shard-request-cache>>.
|
||||
|
||||
`allow_partial_search_results`::
|
||||
|
||||
Set to `false` to return an overall failure if the request would produce partial
|
||||
results. Defaults to true, which will allow partial results in the case of timeouts
|
||||
or partial failures.
|
||||
|
||||
`terminate_after`::
|
||||
|
||||
The maximum number of documents to collect for each shard,
|
||||
|
@ -103,9 +109,9 @@ And here is a sample response:
|
|||
|
||||
|
||||
|
||||
Out of the above, the `search_type` and the `request_cache` must be passed as
|
||||
query-string parameters. The rest of the search request should be passed
|
||||
within the body itself. The body content can also be passed as a REST
|
||||
Out of the above, the `search_type`, `request_cache` and the `allow_partial_search_results`
|
||||
settings must be passed as query-string parameters. The rest of the search request should
|
||||
be passed within the body itself. The body content can also be passed as a REST
|
||||
parameter named `source`.
|
||||
|
||||
Both HTTP GET and HTTP POST can be used to execute search with body. Since not
|
||||
|
|
|
@ -6,7 +6,7 @@ have matches in a different scope. In the parent/child case, parent documents ar
|
|||
documents or child documents are returned based on matches in parent documents. In the nested case, documents are returned
|
||||
based on matches in nested inner objects.
|
||||
|
||||
In both cases, the actual matches in the different scopes that caused a document to be returned is hidden. In many cases,
|
||||
In both cases, the actual matches in the different scopes that caused a document to be returned are hidden. In many cases,
|
||||
it's very useful to know which inner nested objects (in the case of nested) or children/parent documents (in the case
|
||||
of parent/child) caused certain information to be returned. The inner hits feature can be used for this. This feature
|
||||
returns per search hit in the search response additional nested hits that caused a search hit to match in a different scope.
|
||||
|
|
|
@ -122,4 +122,8 @@ Defaults to no terminate_after.
|
|||
Defaults to `query_then_fetch`. See
|
||||
<<search-request-search-type,_Search Type_>> for
|
||||
more details on the different types of search that can be performed.
|
||||
|
||||
|`allow_partial_search_results` |Set to `false` to return an overall failure if the request would produce
|
||||
partial results. Defaults to true, which will allow partial results in the case of timeouts
|
||||
or partial failures..
|
||||
|=======================================================================
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
distributionUrl=https\://services.gradle.org/distributions/gradle-4.5-all.zip
|
||||
distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-4.3-all.zip
|
||||
distributionSha256Sum=b3afcc2d5aaf4d23eeab2409d64c54046147322d05acc7fb5a63f84d8a2b8bd7
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
distributionSha256Sum=6ac2f8f9302f50241bf14cc5f4a3d88504ad20e61bb98c5fd048f7723b61397e
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
|
||||
archivesBaseName = 'elasticsearch-secure-sm'
|
||||
|
||||
publishing {
|
||||
publications {
|
||||
nebula {
|
||||
artifactId = archivesBaseName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
// do not add non-test compile dependencies to secure-sm without a good reason to do so
|
||||
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testCompile "junit:junit:${versions.junit}"
|
||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
|
||||
if (isEclipse == false || project.path == ":libs:secure-sm-tests") {
|
||||
testCompile("org.elasticsearch.test:framework:${version}") {
|
||||
exclude group: 'org.elasticsearch', module: 'secure-sm'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
}
|
||||
|
||||
if (isEclipse) {
|
||||
// in Eclipse the project is under a fake root so we need to change around the source sets
|
||||
sourceSets {
|
||||
if (project.path == ":libs:secure-sm") {
|
||||
main.java.srcDirs = ['java']
|
||||
main.resources.srcDirs = ['resources']
|
||||
} else {
|
||||
test.java.srcDirs = ['java']
|
||||
test.resources.srcDirs = ['resources']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// JAR hell is part of core which we do not want to add as a dependency
|
||||
jarHell.enabled = false
|
||||
|
||||
namingConventions {
|
||||
testClass = 'junit.framework.TestCase'
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
// this is just shell gradle file for eclipse to have separate projects for secure-sm src and tests
|
||||
apply from: '../../build.gradle'
|
|
@ -0,0 +1,265 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.secure_sm;
|
||||
|
||||
import java.security.AccessController;
|
||||
import java.security.Permission;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Extension of SecurityManager that works around a few design flaws in Java Security.
|
||||
* <p>
|
||||
* There are a few major problems that require custom {@code SecurityManager} logic to fix:
|
||||
* <ul>
|
||||
* <li>{@code exitVM} permission is implicitly granted to all code by the default
|
||||
* Policy implementation. For a server app, this is not wanted. </li>
|
||||
* <li>ThreadGroups are not enforced by default, instead only system threads are
|
||||
* protected out of box by {@code modifyThread/modifyThreadGroup}. Applications
|
||||
* are encouraged to override the logic here to implement a stricter policy.
|
||||
* <li>System threads are not even really protected, because if the system uses
|
||||
* ThreadPools, {@code modifyThread} is abused by its {@code shutdown} checks. This means
|
||||
* a thread must have {@code modifyThread} to even terminate its own pool, leaving
|
||||
* system threads unprotected.
|
||||
* </ul>
|
||||
* This class throws exception on {@code exitVM} calls, and provides a whitelist where calls
|
||||
* from exit are allowed.
|
||||
* <p>
|
||||
* Additionally it enforces threadgroup security with the following rules:
|
||||
* <ul>
|
||||
* <li>{@code modifyThread} and {@code modifyThreadGroup} are required for any thread access
|
||||
* checks: with these permissions, access is granted as long as the thread group is
|
||||
* the same or an ancestor ({@code sourceGroup.parentOf(targetGroup) == true}).
|
||||
* <li>code without these permissions can do very little, except to interrupt itself. It may
|
||||
* not even create new threads.
|
||||
* <li>very special cases (like test runners) that have {@link ThreadPermission} can violate
|
||||
* threadgroup security rules.
|
||||
* </ul>
|
||||
* <p>
|
||||
* If java security debugging ({@code java.security.debug}) is enabled, and this SecurityManager
|
||||
* is installed, it will emit additional debugging information when threadgroup access checks fail.
|
||||
*
|
||||
* @see SecurityManager#checkAccess(Thread)
|
||||
* @see SecurityManager#checkAccess(ThreadGroup)
|
||||
* @see <a href="http://cs.oswego.edu/pipermail/concurrency-interest/2009-August/006508.html">
|
||||
* http://cs.oswego.edu/pipermail/concurrency-interest/2009-August/006508.html</a>
|
||||
*/
|
||||
public class SecureSM extends SecurityManager {
|
||||
|
||||
private final String[] classesThatCanExit;
|
||||
|
||||
/**
|
||||
* Creates a new security manager where no packages can exit nor halt the virtual machine.
|
||||
*/
|
||||
public SecureSM() {
|
||||
this(new String[0]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new security manager with the specified list of regular expressions as the those that class names will be tested against to
|
||||
* check whether or not a class can exit or halt the virtual machine.
|
||||
*
|
||||
* @param classesThatCanExit the list of classes that can exit or halt the virtual machine
|
||||
*/
|
||||
public SecureSM(final String[] classesThatCanExit) {
|
||||
this.classesThatCanExit = classesThatCanExit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new security manager with a standard set of test packages being the only packages that can exit or halt the virtual
|
||||
* machine. The packages that can exit are:
|
||||
* <ul>
|
||||
* <li><code>org.apache.maven.surefire.booter.</code></li>
|
||||
* <li><code>com.carrotsearch.ant.tasks.junit4.</code></li>
|
||||
* <li><code>org.eclipse.internal.junit.runner.</code></li>
|
||||
* <li><code>com.intellij.rt.execution.junit.</code></li>
|
||||
* </ul>
|
||||
*
|
||||
* @return an instance of SecureSM where test packages can halt or exit the virtual machine
|
||||
*/
|
||||
public static SecureSM createTestSecureSM() {
|
||||
return new SecureSM(TEST_RUNNER_PACKAGES);
|
||||
}
|
||||
|
||||
static final String[] TEST_RUNNER_PACKAGES = new String[] {
|
||||
// surefire test runner
|
||||
"org\\.apache\\.maven\\.surefire\\.booter\\..*",
|
||||
// junit4 test runner
|
||||
"com\\.carrotsearch\\.ant\\.tasks\\.junit4\\.slave\\..*",
|
||||
// eclipse test runner
|
||||
"org\\.eclipse.jdt\\.internal\\.junit\\.runner\\..*",
|
||||
// intellij test runner
|
||||
"com\\.intellij\\.rt\\.execution\\.junit\\..*"
|
||||
};
|
||||
|
||||
// java.security.debug support
|
||||
private static final boolean DEBUG = AccessController.doPrivileged(new PrivilegedAction<Boolean>() {
|
||||
@Override
|
||||
public Boolean run() {
|
||||
try {
|
||||
String v = System.getProperty("java.security.debug");
|
||||
// simple check that they are trying to debug
|
||||
return v != null && v.length() > 0;
|
||||
} catch (SecurityException e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@Override
|
||||
@SuppressForbidden(reason = "java.security.debug messages go to standard error")
|
||||
public void checkAccess(Thread t) {
|
||||
try {
|
||||
checkThreadAccess(t);
|
||||
} catch (SecurityException e) {
|
||||
if (DEBUG) {
|
||||
System.err.println("access: caller thread=" + Thread.currentThread());
|
||||
System.err.println("access: target thread=" + t);
|
||||
debugThreadGroups(Thread.currentThread().getThreadGroup(), t.getThreadGroup());
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressForbidden(reason = "java.security.debug messages go to standard error")
|
||||
public void checkAccess(ThreadGroup g) {
|
||||
try {
|
||||
checkThreadGroupAccess(g);
|
||||
} catch (SecurityException e) {
|
||||
if (DEBUG) {
|
||||
System.err.println("access: caller thread=" + Thread.currentThread());
|
||||
debugThreadGroups(Thread.currentThread().getThreadGroup(), g);
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "java.security.debug messages go to standard error")
|
||||
private void debugThreadGroups(final ThreadGroup caller, final ThreadGroup target) {
|
||||
System.err.println("access: caller group=" + caller);
|
||||
System.err.println("access: target group=" + target);
|
||||
}
|
||||
|
||||
// thread permission logic
|
||||
|
||||
private static final Permission MODIFY_THREAD_PERMISSION = new RuntimePermission("modifyThread");
|
||||
private static final Permission MODIFY_ARBITRARY_THREAD_PERMISSION = new ThreadPermission("modifyArbitraryThread");
|
||||
|
||||
protected void checkThreadAccess(Thread t) {
|
||||
Objects.requireNonNull(t);
|
||||
|
||||
// first, check if we can modify threads at all.
|
||||
checkPermission(MODIFY_THREAD_PERMISSION);
|
||||
|
||||
// check the threadgroup, if its our thread group or an ancestor, its fine.
|
||||
final ThreadGroup source = Thread.currentThread().getThreadGroup();
|
||||
final ThreadGroup target = t.getThreadGroup();
|
||||
|
||||
if (target == null) {
|
||||
return; // its a dead thread, do nothing.
|
||||
} else if (source.parentOf(target) == false) {
|
||||
checkPermission(MODIFY_ARBITRARY_THREAD_PERMISSION);
|
||||
}
|
||||
}
|
||||
|
||||
private static final Permission MODIFY_THREADGROUP_PERMISSION = new RuntimePermission("modifyThreadGroup");
|
||||
private static final Permission MODIFY_ARBITRARY_THREADGROUP_PERMISSION = new ThreadPermission("modifyArbitraryThreadGroup");
|
||||
|
||||
protected void checkThreadGroupAccess(ThreadGroup g) {
|
||||
Objects.requireNonNull(g);
|
||||
|
||||
// first, check if we can modify thread groups at all.
|
||||
checkPermission(MODIFY_THREADGROUP_PERMISSION);
|
||||
|
||||
// check the threadgroup, if its our thread group or an ancestor, its fine.
|
||||
final ThreadGroup source = Thread.currentThread().getThreadGroup();
|
||||
final ThreadGroup target = g;
|
||||
|
||||
if (source == null) {
|
||||
return; // we are a dead thread, do nothing
|
||||
} else if (source.parentOf(target) == false) {
|
||||
checkPermission(MODIFY_ARBITRARY_THREADGROUP_PERMISSION);
|
||||
}
|
||||
}
|
||||
|
||||
// exit permission logic
|
||||
@Override
|
||||
public void checkExit(int status) {
|
||||
innerCheckExit(status);
|
||||
}
|
||||
|
||||
/**
|
||||
* The "Uwe Schindler" algorithm.
|
||||
*
|
||||
* @param status the exit status
|
||||
*/
|
||||
protected void innerCheckExit(final int status) {
|
||||
AccessController.doPrivileged(new PrivilegedAction<Void>() {
|
||||
@Override
|
||||
public Void run() {
|
||||
final String systemClassName = System.class.getName(),
|
||||
runtimeClassName = Runtime.class.getName();
|
||||
String exitMethodHit = null;
|
||||
for (final StackTraceElement se : Thread.currentThread().getStackTrace()) {
|
||||
final String className = se.getClassName(), methodName = se.getMethodName();
|
||||
if (
|
||||
("exit".equals(methodName) || "halt".equals(methodName)) &&
|
||||
(systemClassName.equals(className) || runtimeClassName.equals(className))
|
||||
) {
|
||||
exitMethodHit = className + '#' + methodName + '(' + status + ')';
|
||||
continue;
|
||||
}
|
||||
|
||||
if (exitMethodHit != null) {
|
||||
if (classesThatCanExit == null) {
|
||||
break;
|
||||
}
|
||||
if (classCanExit(className, classesThatCanExit)) {
|
||||
// this exit point is allowed, we return normally from closure:
|
||||
return null;
|
||||
}
|
||||
// anything else in stack trace is not allowed, break and throw SecurityException below:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (exitMethodHit == null) {
|
||||
// should never happen, only if JVM hides stack trace - replace by generic:
|
||||
exitMethodHit = "JVM exit method";
|
||||
}
|
||||
throw new SecurityException(exitMethodHit + " calls are not allowed");
|
||||
}
|
||||
});
|
||||
|
||||
// we passed the stack check, delegate to super, so default policy can still deny permission:
|
||||
super.checkExit(status);
|
||||
}
|
||||
|
||||
static boolean classCanExit(final String className, final String[] classesThatCanExit) {
|
||||
for (final String classThatCanExit : classesThatCanExit) {
|
||||
if (className.matches(classThatCanExit)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.secure_sm;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
/**
|
||||
* Annotation to suppress forbidden-apis errors inside a whole class, a method, or a field.
|
||||
*/
|
||||
@Retention(RetentionPolicy.CLASS)
|
||||
@Target({ ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE })
|
||||
@interface SuppressForbidden {
|
||||
String reason();
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.secure_sm;
|
||||
|
||||
import java.security.BasicPermission;
|
||||
|
||||
/**
|
||||
* Permission to modify threads or thread groups normally not accessible
|
||||
* to the current thread.
|
||||
* <p>
|
||||
* {@link SecureSM} enforces ThreadGroup security: threads with
|
||||
* {@code RuntimePermission("modifyThread")} or {@code RuntimePermission("modifyThreadGroup")}
|
||||
* are only allowed to modify their current thread group or an ancestor of that group.
|
||||
* <p>
|
||||
* In some cases (e.g. test runners), code needs to manipulate arbitrary threads,
|
||||
* so this Permission provides for that: the targets {@code modifyArbitraryThread} and
|
||||
* {@code modifyArbitraryThreadGroup} allow a thread blanket access to any group.
|
||||
*
|
||||
* @see ThreadGroup
|
||||
* @see SecureSM
|
||||
*/
|
||||
public final class ThreadPermission extends BasicPermission {
|
||||
|
||||
/**
|
||||
* Creates a new ThreadPermission object.
|
||||
*
|
||||
* @param name target name
|
||||
*/
|
||||
public ThreadPermission(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new ThreadPermission object.
|
||||
* This constructor exists for use by the {@code Policy} object to instantiate new Permission objects.
|
||||
*
|
||||
* @param name target name
|
||||
* @param actions ignored
|
||||
*/
|
||||
public ThreadPermission(String name, String actions) {
|
||||
super(name, actions);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
|
||||
// this is just shell gradle file for eclipse to have separate projects for secure-sm src and tests
|
||||
apply from: '../../build.gradle'
|
||||
|
||||
dependencies {
|
||||
testCompile project(':libs:secure-sm')
|
||||
}
|
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.secure_sm;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.security.Permission;
|
||||
import java.security.Policy;
|
||||
import java.security.ProtectionDomain;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/** Simple tests for SecureSM */
|
||||
public class SecureSMTests extends TestCase {
|
||||
static {
|
||||
// install a mock security policy:
|
||||
// AllPermission to source code
|
||||
// ThreadPermission not granted anywhere else
|
||||
final ProtectionDomain sourceCode = SecureSM.class.getProtectionDomain();
|
||||
Policy.setPolicy(new Policy() {
|
||||
@Override
|
||||
public boolean implies(ProtectionDomain domain, Permission permission) {
|
||||
if (domain == sourceCode) {
|
||||
return true;
|
||||
} else if (permission instanceof ThreadPermission) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
});
|
||||
System.setSecurityManager(SecureSM.createTestSecureSM());
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "testing that System#exit is blocked")
|
||||
public void testTryToExit() {
|
||||
try {
|
||||
System.exit(1);
|
||||
fail("did not hit expected exception");
|
||||
} catch (SecurityException expected) {}
|
||||
}
|
||||
|
||||
public void testClassCanExit() {
|
||||
assertTrue(SecureSM.classCanExit("org.apache.maven.surefire.booter.CommandReader", SecureSM.TEST_RUNNER_PACKAGES));
|
||||
assertTrue(SecureSM.classCanExit("com.carrotsearch.ant.tasks.junit4.slave.JvmExit", SecureSM.TEST_RUNNER_PACKAGES));
|
||||
assertTrue(SecureSM.classCanExit("org.eclipse.jdt.internal.junit.runner.RemoteTestRunner", SecureSM.TEST_RUNNER_PACKAGES));
|
||||
assertTrue(SecureSM.classCanExit("com.intellij.rt.execution.junit.JUnitStarter", SecureSM.TEST_RUNNER_PACKAGES));
|
||||
assertTrue(SecureSM.classCanExit("org.elasticsearch.Foo", new String[]{"org.elasticsearch.Foo"}));
|
||||
assertFalse(SecureSM.classCanExit("org.elasticsearch.Foo", new String[]{"org.elasticsearch.Bar"}));
|
||||
}
|
||||
|
||||
public void testCreateThread() throws Exception {
|
||||
Thread t = new Thread();
|
||||
t.start();
|
||||
t.join();
|
||||
// no exception
|
||||
}
|
||||
|
||||
public void testCreateThreadGroup() throws Exception {
|
||||
Thread t = new Thread(new ThreadGroup("childgroup"), "child");
|
||||
t.start();
|
||||
t.join();
|
||||
// no exception
|
||||
}
|
||||
|
||||
public void testModifyChild() throws Exception {
|
||||
final AtomicBoolean interrupted = new AtomicBoolean(false);
|
||||
Thread t = new Thread(new ThreadGroup("childgroup"), "child") {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
Thread.sleep(Long.MAX_VALUE);
|
||||
} catch (InterruptedException expected) {
|
||||
interrupted.set(true);
|
||||
}
|
||||
}
|
||||
};
|
||||
t.start();
|
||||
t.interrupt();
|
||||
t.join();
|
||||
// no exception
|
||||
assertTrue(interrupted.get());
|
||||
}
|
||||
|
||||
public void testNoModifySibling() throws Exception {
|
||||
final AtomicBoolean interrupted1 = new AtomicBoolean(false);
|
||||
final AtomicBoolean interrupted2 = new AtomicBoolean(false);
|
||||
|
||||
final Thread t1 = new Thread(new ThreadGroup("childgroup"), "child") {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
Thread.sleep(Long.MAX_VALUE);
|
||||
} catch (InterruptedException expected) {
|
||||
interrupted1.set(true);
|
||||
}
|
||||
}
|
||||
};
|
||||
t1.start();
|
||||
|
||||
Thread t2 = new Thread(new ThreadGroup("anothergroup"), "another child") {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
Thread.sleep(Long.MAX_VALUE);
|
||||
} catch (InterruptedException expected) {
|
||||
interrupted2.set(true);
|
||||
try {
|
||||
t1.interrupt(); // try to bogusly interrupt our sibling
|
||||
fail("did not hit expected exception");
|
||||
} catch (SecurityException expected2) {}
|
||||
}
|
||||
}
|
||||
};
|
||||
t2.start();
|
||||
t2.interrupt();
|
||||
t2.join();
|
||||
// sibling attempted to but was not able to muck with its other sibling
|
||||
assertTrue(interrupted2.get());
|
||||
assertFalse(interrupted1.get());
|
||||
// but we are the parent and can terminate
|
||||
t1.interrupt();
|
||||
t1.join();
|
||||
assertTrue(interrupted1.get());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.secure_sm;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.security.AllPermission;
|
||||
|
||||
/**
|
||||
* Simple tests for ThreadPermission
|
||||
*/
|
||||
public class ThreadPermissionTests extends TestCase {
|
||||
|
||||
public void testEquals() {
|
||||
assertEquals(new ThreadPermission("modifyArbitraryThread"), new ThreadPermission("modifyArbitraryThread"));
|
||||
assertFalse(new ThreadPermission("modifyArbitraryThread").equals(new AllPermission()));
|
||||
assertFalse(new ThreadPermission("modifyArbitraryThread").equals(new ThreadPermission("modifyArbitraryThreadGroup"))); }
|
||||
|
||||
public void testImplies() {
|
||||
assertTrue(new ThreadPermission("modifyArbitraryThread").implies(new ThreadPermission("modifyArbitraryThread")));
|
||||
assertTrue(new ThreadPermission("modifyArbitraryThreadGroup").implies(new ThreadPermission("modifyArbitraryThreadGroup")));
|
||||
assertFalse(new ThreadPermission("modifyArbitraryThread").implies(new ThreadPermission("modifyArbitraryThreadGroup")));
|
||||
assertFalse(new ThreadPermission("modifyArbitraryThreadGroup").implies(new ThreadPermission("modifyArbitraryThread")));
|
||||
assertFalse(new ThreadPermission("modifyArbitraryThread").implies(new AllPermission()));
|
||||
assertFalse(new ThreadPermission("modifyArbitraryThreadGroup").implies(new AllPermission()));
|
||||
assertTrue(new ThreadPermission("*").implies(new ThreadPermission("modifyArbitraryThread")));
|
||||
assertTrue(new ThreadPermission("*").implies(new ThreadPermission("modifyArbitraryThreadGroup")));
|
||||
assertFalse(new ThreadPermission("*").implies(new AllPermission()));
|
||||
}
|
||||
}
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.MultiValuesSourceAggregationBuilder;
|
||||
|
@ -46,6 +47,17 @@ public class MatrixStatsAggregationBuilder
|
|||
super(name, ValuesSourceType.NUMERIC, ValueType.NUMERIC);
|
||||
}
|
||||
|
||||
protected MatrixStatsAggregationBuilder(MatrixStatsAggregationBuilder clone,
|
||||
AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metaData) {
|
||||
super(clone, factoriesBuilder, metaData);
|
||||
this.multiValueMode = clone.multiValueMode;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metaData) {
|
||||
return new MatrixStatsAggregationBuilder(this, factoriesBuilder, metaData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
|
|
|
@ -39,7 +39,7 @@ public class MatrixStatsParser extends NumericValuesSourceParser {
|
|||
@Override
|
||||
protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser,
|
||||
Map<ParseField, Object> otherOptions) throws IOException {
|
||||
if (MULTIVALUE_MODE_FIELD.match(currentFieldName)) {
|
||||
if (MULTIVALUE_MODE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
otherOptions.put(MULTIVALUE_MODE_FIELD, parser.text());
|
||||
return true;
|
||||
|
|
|
@ -56,6 +56,14 @@ public abstract class MultiValuesSourceAggregationBuilder<VS extends ValuesSourc
|
|||
super(name, valuesSourceType, targetValueType);
|
||||
}
|
||||
|
||||
protected LeafOnly(LeafOnly<VS, AB> clone, Builder factoriesBuilder, Map<String, Object> metaData) {
|
||||
super(clone, factoriesBuilder, metaData);
|
||||
if (factoriesBuilder.count() > 0) {
|
||||
throw new AggregationInitializationException("Aggregator [" + name + "] of type ["
|
||||
+ getType() + "] cannot accept sub-aggregations");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream that does not serialize its targetValueType. This should be used by most subclasses.
|
||||
*/
|
||||
|
@ -95,6 +103,18 @@ public abstract class MultiValuesSourceAggregationBuilder<VS extends ValuesSourc
|
|||
this.targetValueType = targetValueType;
|
||||
}
|
||||
|
||||
protected MultiValuesSourceAggregationBuilder(MultiValuesSourceAggregationBuilder<VS, AB> clone,
|
||||
Builder factoriesBuilder, Map<String, Object> metaData) {
|
||||
super(clone, factoriesBuilder, metaData);
|
||||
this.valuesSourceType = clone.valuesSourceType;
|
||||
this.targetValueType = clone.targetValueType;
|
||||
this.fields = new ArrayList<>(clone.fields);
|
||||
this.valueType = clone.valueType;
|
||||
this.format = clone.format;
|
||||
this.missingMap = new HashMap<>(clone.missingMap);
|
||||
this.missing = clone.missing;
|
||||
}
|
||||
|
||||
protected MultiValuesSourceAggregationBuilder(StreamInput in, ValuesSourceType valuesSourceType, ValueType targetValueType)
|
||||
throws IOException {
|
||||
super(in);
|
||||
|
|
|
@ -88,11 +88,11 @@ public abstract class MultiValuesSourceParser<VS extends ValuesSource> implement
|
|||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
if (CommonFields.FIELDS.match(currentFieldName)) {
|
||||
if (CommonFields.FIELDS.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
fields = Collections.singletonList(parser.text());
|
||||
} else if (formattable && CommonFields.FORMAT.match(currentFieldName)) {
|
||||
} else if (formattable && CommonFields.FORMAT.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
format = parser.text();
|
||||
} else if (CommonFields.VALUE_TYPE.match(currentFieldName)) {
|
||||
} else if (CommonFields.VALUE_TYPE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " +
|
||||
"Multi-field aggregations do not support scripts.");
|
||||
|
@ -101,12 +101,12 @@ public abstract class MultiValuesSourceParser<VS extends ValuesSource> implement
|
|||
"Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "].");
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (CommonFields.MISSING.match(currentFieldName)) {
|
||||
if (CommonFields.MISSING.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
missingMap = new HashMap<>();
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
parseMissingAndAdd(aggregationName, currentFieldName, parser, missingMap);
|
||||
}
|
||||
} else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) {
|
||||
} else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " +
|
||||
"Multi-field aggregations do not support scripts.");
|
||||
|
@ -116,11 +116,11 @@ public abstract class MultiValuesSourceParser<VS extends ValuesSource> implement
|
|||
"Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "].");
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) {
|
||||
if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " +
|
||||
"Multi-field aggregations do not support scripts.");
|
||||
} else if (CommonFields.FIELDS.match(currentFieldName)) {
|
||||
} else if (CommonFields.FIELDS.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
fields = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
|
|
|
@ -23,6 +23,8 @@ import com.fasterxml.jackson.core.JsonFactory;
|
|||
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContentParser;
|
||||
import org.elasticsearch.ingest.AbstractProcessor;
|
||||
|
@ -95,8 +97,7 @@ public final class ScriptProcessor extends AbstractProcessor {
|
|||
public ScriptProcessor create(Map<String, Processor.Factory> registry, String processorTag,
|
||||
Map<String, Object> config) throws Exception {
|
||||
XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(config);
|
||||
JsonXContentParser parser = new JsonXContentParser(NamedXContentRegistry.EMPTY,
|
||||
JSON_FACTORY.createParser(builder.bytes().streamInput()));
|
||||
XContentParser parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, builder.bytes().streamInput());
|
||||
Script script = Script.parse(parser);
|
||||
|
||||
Arrays.asList("id", "source", "inline", "lang", "params", "options").forEach(config::remove);
|
||||
|
|
|
@ -78,9 +78,11 @@
|
|||
id: 4
|
||||
body: { "theField": "foo 4" }
|
||||
|
||||
# we use a different index here since we compare the explain description which contains a doc ID and we can only be sure that it's 0
|
||||
# if we are the only doc in the shard.
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
index: otherindex
|
||||
type: type
|
||||
id: 5
|
||||
body: { "otherField": "foo" }
|
||||
|
@ -113,7 +115,7 @@
|
|||
|
||||
- match: { hits.total: 1 }
|
||||
- length: { hits.hits: 1 }
|
||||
- match: { hits.hits.0._explanation.description: "weight(otherField:foo in 1) [PerFieldSimilarity], result of:" }
|
||||
- match: { hits.hits.0._explanation.description: "weight(otherField:foo in 0) [PerFieldSimilarity], result of:" }
|
||||
|
||||
- do:
|
||||
search_template:
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.painless;
|
||||
|
||||
import org.elasticsearch.painless.Definition.Method;
|
||||
import org.elasticsearch.painless.Definition.RuntimeClass;
|
||||
import org.elasticsearch.painless.Definition.Struct;
|
||||
|
||||
import java.lang.invoke.CallSite;
|
||||
import java.lang.invoke.MethodHandle;
|
||||
|
@ -185,7 +185,7 @@ public final class Def {
|
|||
Definition.MethodKey key = new Definition.MethodKey(name, arity);
|
||||
// check whitelist for matching method
|
||||
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
||||
RuntimeClass struct = definition.getRuntimeClass(clazz);
|
||||
Struct struct = definition.RuntimeClassToStruct(clazz);
|
||||
|
||||
if (struct != null) {
|
||||
Method method = struct.methods.get(key);
|
||||
|
@ -195,7 +195,7 @@ public final class Def {
|
|||
}
|
||||
|
||||
for (Class<?> iface : clazz.getInterfaces()) {
|
||||
struct = definition.getRuntimeClass(iface);
|
||||
struct = definition.RuntimeClassToStruct(iface);
|
||||
|
||||
if (struct != null) {
|
||||
Method method = struct.methods.get(key);
|
||||
|
@ -279,7 +279,7 @@ public final class Def {
|
|||
captures[capture] = callSiteType.parameterType(i + 1 + capture);
|
||||
}
|
||||
MethodHandle filter;
|
||||
Definition.Type interfaceType = method.arguments.get(i - 1 - replaced);
|
||||
Definition.Type interfaceType = definition.ClassToType(method.arguments.get(i - 1 - replaced));
|
||||
if (signature.charAt(0) == 'S') {
|
||||
// the implementation is strongly typed, now that we know the interface type,
|
||||
// we have everything.
|
||||
|
@ -325,7 +325,7 @@ public final class Def {
|
|||
static MethodHandle lookupReference(Definition definition, Lookup lookup, String interfaceClass,
|
||||
Class<?> receiverClass, String name) throws Throwable {
|
||||
Definition.Type interfaceType = definition.getType(interfaceClass);
|
||||
Method interfaceMethod = interfaceType.struct.getFunctionalMethod();
|
||||
Method interfaceMethod = interfaceType.struct.functionalMethod;
|
||||
if (interfaceMethod == null) {
|
||||
throw new IllegalArgumentException("Class [" + interfaceClass + "] is not a functional interface");
|
||||
}
|
||||
|
@ -342,7 +342,7 @@ public final class Def {
|
|||
final FunctionRef ref;
|
||||
if ("this".equals(type)) {
|
||||
// user written method
|
||||
Method interfaceMethod = clazz.struct.getFunctionalMethod();
|
||||
Method interfaceMethod = clazz.struct.functionalMethod;
|
||||
if (interfaceMethod == null) {
|
||||
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " +
|
||||
"to [" + clazz.name + "], not a functional interface");
|
||||
|
@ -363,10 +363,10 @@ public final class Def {
|
|||
}
|
||||
throw new IllegalArgumentException("Unknown call [" + call + "] with [" + arity + "] arguments.");
|
||||
}
|
||||
ref = new FunctionRef(clazz, interfaceMethod, call, handle.type(), captures.length);
|
||||
ref = new FunctionRef(clazz.clazz, interfaceMethod, call, handle.type(), captures.length);
|
||||
} else {
|
||||
// whitelist lookup
|
||||
ref = new FunctionRef(definition, clazz, type, call, captures.length);
|
||||
ref = new FunctionRef(definition, clazz.clazz, type, call, captures.length);
|
||||
}
|
||||
final CallSite callSite = LambdaBootstrap.lambdaBootstrap(
|
||||
lookup,
|
||||
|
@ -415,7 +415,7 @@ public final class Def {
|
|||
static MethodHandle lookupGetter(Definition definition, Class<?> receiverClass, String name) {
|
||||
// first try whitelist
|
||||
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
||||
RuntimeClass struct = definition.getRuntimeClass(clazz);
|
||||
Struct struct = definition.RuntimeClassToStruct(clazz);
|
||||
|
||||
if (struct != null) {
|
||||
MethodHandle handle = struct.getters.get(name);
|
||||
|
@ -425,7 +425,7 @@ public final class Def {
|
|||
}
|
||||
|
||||
for (final Class<?> iface : clazz.getInterfaces()) {
|
||||
struct = definition.getRuntimeClass(iface);
|
||||
struct = definition.RuntimeClassToStruct(iface);
|
||||
|
||||
if (struct != null) {
|
||||
MethodHandle handle = struct.getters.get(name);
|
||||
|
@ -486,7 +486,7 @@ public final class Def {
|
|||
static MethodHandle lookupSetter(Definition definition, Class<?> receiverClass, String name) {
|
||||
// first try whitelist
|
||||
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
||||
RuntimeClass struct = definition.getRuntimeClass(clazz);
|
||||
Struct struct = definition.RuntimeClassToStruct(clazz);
|
||||
|
||||
if (struct != null) {
|
||||
MethodHandle handle = struct.setters.get(name);
|
||||
|
@ -496,7 +496,7 @@ public final class Def {
|
|||
}
|
||||
|
||||
for (final Class<?> iface : clazz.getInterfaces()) {
|
||||
struct = definition.getRuntimeClass(iface);
|
||||
struct = definition.RuntimeClassToStruct(iface);
|
||||
|
||||
if (struct != null) {
|
||||
MethodHandle handle = struct.setters.get(name);
|
||||
|
|
|
@ -135,13 +135,13 @@ public final class Definition {
|
|||
public final String name;
|
||||
public final Struct owner;
|
||||
public final Class<?> augmentation;
|
||||
public final Type rtn;
|
||||
public final List<Type> arguments;
|
||||
public final Class<?> rtn;
|
||||
public final List<Class<?>> arguments;
|
||||
public final org.objectweb.asm.commons.Method method;
|
||||
public final int modifiers;
|
||||
public final MethodHandle handle;
|
||||
|
||||
public Method(String name, Struct owner, Class<?> augmentation, Type rtn, List<Type> arguments,
|
||||
public Method(String name, Struct owner, Class<?> augmentation, Class<?> rtn, List<Class<?>> arguments,
|
||||
org.objectweb.asm.commons.Method method, int modifiers, MethodHandle handle) {
|
||||
this.name = name;
|
||||
this.augmentation = augmentation;
|
||||
|
@ -172,21 +172,21 @@ public final class Definition {
|
|||
params = new Class<?>[1 + arguments.size()];
|
||||
params[0] = augmentation;
|
||||
for (int i = 0; i < arguments.size(); i++) {
|
||||
params[i + 1] = arguments.get(i).clazz;
|
||||
params[i + 1] = defClassToObjectClass(arguments.get(i));
|
||||
}
|
||||
returnValue = rtn.clazz;
|
||||
returnValue = defClassToObjectClass(rtn);
|
||||
} else if (Modifier.isStatic(modifiers)) {
|
||||
// static method: straightforward copy
|
||||
params = new Class<?>[arguments.size()];
|
||||
for (int i = 0; i < arguments.size(); i++) {
|
||||
params[i] = arguments.get(i).clazz;
|
||||
params[i] = defClassToObjectClass(arguments.get(i));
|
||||
}
|
||||
returnValue = rtn.clazz;
|
||||
returnValue = defClassToObjectClass(rtn);
|
||||
} else if ("<init>".equals(name)) {
|
||||
// constructor: returns the owner class
|
||||
params = new Class<?>[arguments.size()];
|
||||
for (int i = 0; i < arguments.size(); i++) {
|
||||
params[i] = arguments.get(i).clazz;
|
||||
params[i] = defClassToObjectClass(arguments.get(i));
|
||||
}
|
||||
returnValue = owner.clazz;
|
||||
} else {
|
||||
|
@ -194,9 +194,9 @@ public final class Definition {
|
|||
params = new Class<?>[1 + arguments.size()];
|
||||
params[0] = owner.clazz;
|
||||
for (int i = 0; i < arguments.size(); i++) {
|
||||
params[i + 1] = arguments.get(i).clazz;
|
||||
params[i + 1] = defClassToObjectClass(arguments.get(i));
|
||||
}
|
||||
returnValue = rtn.clazz;
|
||||
returnValue = defClassToObjectClass(rtn);
|
||||
}
|
||||
return MethodType.methodType(returnValue, params);
|
||||
}
|
||||
|
@ -223,17 +223,17 @@ public final class Definition {
|
|||
public static final class Field {
|
||||
public final String name;
|
||||
public final Struct owner;
|
||||
public final Type type;
|
||||
public final Class<?> clazz;
|
||||
public final String javaName;
|
||||
public final int modifiers;
|
||||
private final MethodHandle getter;
|
||||
private final MethodHandle setter;
|
||||
|
||||
private Field(String name, String javaName, Struct owner, Type type, int modifiers, MethodHandle getter, MethodHandle setter) {
|
||||
private Field(String name, String javaName, Struct owner, Class<?> clazz, int modifiers, MethodHandle getter, MethodHandle setter) {
|
||||
this.name = name;
|
||||
this.javaName = javaName;
|
||||
this.owner = owner;
|
||||
this.type = type;
|
||||
this.clazz = clazz;
|
||||
this.modifiers = modifiers;
|
||||
this.getter = getter;
|
||||
this.setter = setter;
|
||||
|
@ -307,9 +307,12 @@ public final class Definition {
|
|||
public final Map<String, Field> staticMembers;
|
||||
public final Map<String, Field> members;
|
||||
|
||||
private final SetOnce<Method> functionalMethod;
|
||||
public final Map<String, MethodHandle> getters;
|
||||
public final Map<String, MethodHandle> setters;
|
||||
|
||||
private Struct(final String name, final Class<?> clazz, final org.objectweb.asm.Type type) {
|
||||
public final Method functionalMethod;
|
||||
|
||||
private Struct(String name, Class<?> clazz, org.objectweb.asm.Type type) {
|
||||
this.name = name;
|
||||
this.clazz = clazz;
|
||||
this.type = type;
|
||||
|
@ -321,10 +324,13 @@ public final class Definition {
|
|||
staticMembers = new HashMap<>();
|
||||
members = new HashMap<>();
|
||||
|
||||
functionalMethod = new SetOnce<>();
|
||||
getters = new HashMap<>();
|
||||
setters = new HashMap<>();
|
||||
|
||||
functionalMethod = null;
|
||||
}
|
||||
|
||||
private Struct(final Struct struct) {
|
||||
private Struct(Struct struct, Method functionalMethod) {
|
||||
name = struct.name;
|
||||
clazz = struct.clazz;
|
||||
type = struct.type;
|
||||
|
@ -336,11 +342,14 @@ public final class Definition {
|
|||
staticMembers = Collections.unmodifiableMap(struct.staticMembers);
|
||||
members = Collections.unmodifiableMap(struct.members);
|
||||
|
||||
functionalMethod = struct.functionalMethod;
|
||||
getters = Collections.unmodifiableMap(struct.getters);
|
||||
setters = Collections.unmodifiableMap(struct.setters);
|
||||
|
||||
this.functionalMethod = functionalMethod;
|
||||
}
|
||||
|
||||
private Struct freeze() {
|
||||
return new Struct(this);
|
||||
private Struct freeze(Method functionalMethod) {
|
||||
return new Struct(this, functionalMethod);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -362,14 +371,6 @@ public final class Definition {
|
|||
public int hashCode() {
|
||||
return name.hashCode();
|
||||
}
|
||||
|
||||
/**
|
||||
* If this class is a functional interface according to JLS, returns its method.
|
||||
* Otherwise returns null.
|
||||
*/
|
||||
public Method getFunctionalMethod() {
|
||||
return functionalMethod.get();
|
||||
}
|
||||
}
|
||||
|
||||
public static class Cast {
|
||||
|
@ -418,25 +419,6 @@ public final class Definition {
|
|||
}
|
||||
}
|
||||
|
||||
public static final class RuntimeClass {
|
||||
private final Struct struct;
|
||||
public final Map<MethodKey, Method> methods;
|
||||
public final Map<String, MethodHandle> getters;
|
||||
public final Map<String, MethodHandle> setters;
|
||||
|
||||
private RuntimeClass(final Struct struct, final Map<MethodKey, Method> methods,
|
||||
final Map<String, MethodHandle> getters, final Map<String, MethodHandle> setters) {
|
||||
this.struct = struct;
|
||||
this.methods = Collections.unmodifiableMap(methods);
|
||||
this.getters = Collections.unmodifiableMap(getters);
|
||||
this.setters = Collections.unmodifiableMap(setters);
|
||||
}
|
||||
|
||||
public Struct getStruct() {
|
||||
return struct;
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns whether or not a non-array type exists. */
|
||||
public boolean isSimpleType(final String name) {
|
||||
return structsMap.containsKey(name);
|
||||
|
@ -452,60 +434,60 @@ public final class Definition {
|
|||
return getTypeInternal(struct, dimensions);
|
||||
}
|
||||
|
||||
public Type getBoxedType(Type unboxed) {
|
||||
if (unboxed.clazz == boolean.class) {
|
||||
return BooleanType;
|
||||
} else if (unboxed.clazz == byte.class) {
|
||||
return ByteType;
|
||||
} else if (unboxed.clazz == short.class) {
|
||||
return ShortType;
|
||||
} else if (unboxed.clazz == char.class) {
|
||||
return CharacterType;
|
||||
} else if (unboxed.clazz == int.class) {
|
||||
return IntegerType;
|
||||
} else if (unboxed.clazz == long.class) {
|
||||
return LongType;
|
||||
} else if (unboxed.clazz == float.class) {
|
||||
return FloatType;
|
||||
} else if (unboxed.clazz == double.class) {
|
||||
return DoubleType;
|
||||
public static Class<?> getBoxedType(Class<?> clazz) {
|
||||
if (clazz == boolean.class) {
|
||||
return Boolean.class;
|
||||
} else if (clazz == byte.class) {
|
||||
return Byte.class;
|
||||
} else if (clazz == short.class) {
|
||||
return Short.class;
|
||||
} else if (clazz == char.class) {
|
||||
return Character.class;
|
||||
} else if (clazz == int.class) {
|
||||
return Integer.class;
|
||||
} else if (clazz == long.class) {
|
||||
return Long.class;
|
||||
} else if (clazz == float.class) {
|
||||
return Float.class;
|
||||
} else if (clazz == double.class) {
|
||||
return Double.class;
|
||||
}
|
||||
|
||||
return unboxed;
|
||||
return clazz;
|
||||
}
|
||||
|
||||
public Type getUnboxedType(Type boxed) {
|
||||
if (boxed.clazz == Boolean.class) {
|
||||
return booleanType;
|
||||
} else if (boxed.clazz == Byte.class) {
|
||||
return byteType;
|
||||
} else if (boxed.clazz == Short.class) {
|
||||
return shortType;
|
||||
} else if (boxed.clazz == Character.class) {
|
||||
return charType;
|
||||
} else if (boxed.clazz == Integer.class) {
|
||||
return intType;
|
||||
} else if (boxed.clazz == Long.class) {
|
||||
return longType;
|
||||
} else if (boxed.clazz == Float.class) {
|
||||
return floatType;
|
||||
} else if (boxed.clazz == Double.class) {
|
||||
return doubleType;
|
||||
public static Class<?> getUnboxedype(Class<?> clazz) {
|
||||
if (clazz == Boolean.class) {
|
||||
return boolean.class;
|
||||
} else if (clazz == Byte.class) {
|
||||
return byte.class;
|
||||
} else if (clazz == Short.class) {
|
||||
return short.class;
|
||||
} else if (clazz == Character.class) {
|
||||
return char.class;
|
||||
} else if (clazz == Integer.class) {
|
||||
return int.class;
|
||||
} else if (clazz == Long.class) {
|
||||
return long.class;
|
||||
} else if (clazz == Float.class) {
|
||||
return float.class;
|
||||
} else if (clazz == Double.class) {
|
||||
return double.class;
|
||||
}
|
||||
|
||||
return boxed;
|
||||
return clazz;
|
||||
}
|
||||
|
||||
public static boolean isConstantType(Type constant) {
|
||||
return constant.clazz == boolean.class ||
|
||||
constant.clazz == byte.class ||
|
||||
constant.clazz == short.class ||
|
||||
constant.clazz == char.class ||
|
||||
constant.clazz == int.class ||
|
||||
constant.clazz == long.class ||
|
||||
constant.clazz == float.class ||
|
||||
constant.clazz == double.class ||
|
||||
constant.clazz == String.class;
|
||||
public static boolean isConstantType(Class<?> clazz) {
|
||||
return clazz == boolean.class ||
|
||||
clazz == byte.class ||
|
||||
clazz == short.class ||
|
||||
clazz == char.class ||
|
||||
clazz == int.class ||
|
||||
clazz == long.class ||
|
||||
clazz == float.class ||
|
||||
clazz == double.class ||
|
||||
clazz == String.class;
|
||||
}
|
||||
|
||||
public static Class<?> ObjectClassTodefClass(Class<?> clazz) {
|
||||
|
@ -569,7 +551,9 @@ public final class Definition {
|
|||
}
|
||||
|
||||
public static String ClassToName(Class<?> clazz) {
|
||||
if (clazz.isArray()) {
|
||||
if (clazz.isLocalClass() || clazz.isAnonymousClass()) {
|
||||
return null;
|
||||
} else if (clazz.isArray()) {
|
||||
Class<?> component = clazz.getComponentType();
|
||||
int dimensions = 1;
|
||||
|
||||
|
@ -579,7 +563,7 @@ public final class Definition {
|
|||
}
|
||||
|
||||
if (component == def.class) {
|
||||
StringBuilder builder = new StringBuilder("def");
|
||||
StringBuilder builder = new StringBuilder(def.class.getSimpleName());
|
||||
|
||||
for (int dimension = 0; dimension < dimensions; dimensions++) {
|
||||
builder.append("[]");
|
||||
|
@ -588,7 +572,7 @@ public final class Definition {
|
|||
return builder.toString();
|
||||
}
|
||||
} else if (clazz == def.class) {
|
||||
return "def";
|
||||
return def.class.getSimpleName();
|
||||
}
|
||||
|
||||
return clazz.getCanonicalName().replace('$', '.');
|
||||
|
@ -606,30 +590,30 @@ public final class Definition {
|
|||
++dimensions;
|
||||
}
|
||||
|
||||
if (clazz == def.class) {
|
||||
return getType(structsMap.get("def"), dimensions);
|
||||
if (component == def.class) {
|
||||
return getType(structsMap.get(def.class.getSimpleName()), dimensions);
|
||||
} else {
|
||||
return getType(runtimeMap.get(clazz).struct, dimensions);
|
||||
return getType(structsMap.get(ClassToName(component)), dimensions);
|
||||
}
|
||||
} else if (clazz == def.class) {
|
||||
return getType(structsMap.get("def"), 0);
|
||||
return getType(structsMap.get(def.class.getSimpleName()), 0);
|
||||
}
|
||||
|
||||
return getType(structsMap.get(ClassToName(clazz)), 0);
|
||||
}
|
||||
|
||||
public static Class<?> TypeToClass (Type type) {
|
||||
if (type.dynamic) {
|
||||
public Struct RuntimeClassToStruct(Class<?> clazz) {
|
||||
return structsMap.get(ClassToName(clazz));
|
||||
}
|
||||
|
||||
public static Class<?> TypeToClass(Type type) {
|
||||
if (def.class.getSimpleName().equals(type.struct.name)) {
|
||||
return ObjectClassTodefClass(type.clazz);
|
||||
}
|
||||
|
||||
return type.clazz;
|
||||
}
|
||||
|
||||
public RuntimeClass getRuntimeClass(Class<?> clazz) {
|
||||
return runtimeMap.get(clazz);
|
||||
}
|
||||
|
||||
public Class<?> getClassFromBinaryName(String name) {
|
||||
Struct struct = structsMap.get(name.replace('$', '.'));
|
||||
|
||||
|
@ -641,13 +625,13 @@ public final class Definition {
|
|||
return simpleTypesMap.values();
|
||||
}
|
||||
|
||||
private static String buildMethodCacheKey(String structName, String methodName, List<Type> arguments) {
|
||||
private static String buildMethodCacheKey(String structName, String methodName, List<Class<?>> arguments) {
|
||||
StringBuilder key = new StringBuilder();
|
||||
key.append(structName);
|
||||
key.append(methodName);
|
||||
|
||||
for (Type argument : arguments) {
|
||||
key.append(argument.name);
|
||||
for (Class<?> argument : arguments) {
|
||||
key.append(argument.getName());
|
||||
}
|
||||
|
||||
return key.toString();
|
||||
|
@ -659,20 +643,19 @@ public final class Definition {
|
|||
|
||||
// INTERNAL IMPLEMENTATION:
|
||||
|
||||
private final Map<Class<?>, RuntimeClass> runtimeMap;
|
||||
private final Map<String, Struct> structsMap;
|
||||
private final Map<String, Type> simpleTypesMap;
|
||||
|
||||
public Definition(List<Whitelist> whitelists) {
|
||||
structsMap = new HashMap<>();
|
||||
simpleTypesMap = new HashMap<>();
|
||||
runtimeMap = new HashMap<>();
|
||||
|
||||
Map<Class<?>, Struct> javaClassesToPainlessStructs = new HashMap<>();
|
||||
String origin = null;
|
||||
|
||||
// add the universal def type
|
||||
structsMap.put("def", new Struct("def", Object.class, org.objectweb.asm.Type.getType(Object.class)));
|
||||
structsMap.put(def.class.getSimpleName(),
|
||||
new Struct(def.class.getSimpleName(), Object.class, org.objectweb.asm.Type.getType(Object.class)));
|
||||
|
||||
try {
|
||||
// first iteration collects all the Painless type names that
|
||||
|
@ -777,7 +760,7 @@ public final class Definition {
|
|||
copyStruct(painlessStruct.name, painlessSuperStructs);
|
||||
|
||||
// copies methods and fields from Object into interface types
|
||||
if (painlessStruct.clazz.isInterface() || ("def").equals(painlessStruct.name)) {
|
||||
if (painlessStruct.clazz.isInterface() || (def.class.getSimpleName()).equals(painlessStruct.name)) {
|
||||
Struct painlessObjectStruct = javaClassesToPainlessStructs.get(Object.class);
|
||||
|
||||
if (painlessObjectStruct != null) {
|
||||
|
@ -786,17 +769,6 @@ public final class Definition {
|
|||
}
|
||||
}
|
||||
|
||||
// mark functional interfaces (or set null, to mark class is not)
|
||||
for (String painlessStructName : structsMap.keySet()) {
|
||||
Struct painlessStruct = structsMap.get(painlessStructName);
|
||||
|
||||
if (painlessStruct.name.equals(painlessStructName) == false) {
|
||||
continue;
|
||||
}
|
||||
|
||||
painlessStruct.functionalMethod.set(computeFunctionalInterfaceMethod(painlessStruct));
|
||||
}
|
||||
|
||||
// precompute runtime classes
|
||||
for (String painlessStructName : structsMap.keySet()) {
|
||||
Struct painlessStruct = structsMap.get(painlessStructName);
|
||||
|
@ -814,7 +786,7 @@ public final class Definition {
|
|||
continue;
|
||||
}
|
||||
|
||||
entry.setValue(entry.getValue().freeze());
|
||||
entry.setValue(entry.getValue().freeze(computeFunctionalInterfaceMethod(entry.getValue())));
|
||||
}
|
||||
|
||||
voidType = getType("void");
|
||||
|
@ -835,7 +807,7 @@ public final class Definition {
|
|||
charType = getType("char");
|
||||
CharacterType = getType("Character");
|
||||
ObjectType = getType("Object");
|
||||
DefType = getType("def");
|
||||
DefType = getType(def.class.getSimpleName());
|
||||
NumberType = getType("Number");
|
||||
StringType = getType("String");
|
||||
ExceptionType = getType("Exception");
|
||||
|
@ -915,17 +887,17 @@ public final class Definition {
|
|||
"parameters " + whitelistConstructor.painlessParameterTypeNames);
|
||||
}
|
||||
|
||||
List<Type> painlessParametersTypes = new ArrayList<>(whitelistConstructor.painlessParameterTypeNames.size());
|
||||
List<Class<?>> painlessParametersTypes = new ArrayList<>(whitelistConstructor.painlessParameterTypeNames.size());
|
||||
Class<?>[] javaClassParameters = new Class<?>[whitelistConstructor.painlessParameterTypeNames.size()];
|
||||
|
||||
for (int parameterCount = 0; parameterCount < whitelistConstructor.painlessParameterTypeNames.size(); ++parameterCount) {
|
||||
String painlessParameterTypeName = whitelistConstructor.painlessParameterTypeNames.get(parameterCount);
|
||||
|
||||
try {
|
||||
Type painlessParameterType = getTypeInternal(painlessParameterTypeName);
|
||||
Class<?> painlessParameterClass = TypeToClass(getTypeInternal(painlessParameterTypeName));
|
||||
|
||||
painlessParametersTypes.add(painlessParameterType);
|
||||
javaClassParameters[parameterCount] = painlessParameterType.clazz;
|
||||
painlessParametersTypes.add(painlessParameterClass);
|
||||
javaClassParameters[parameterCount] = defClassToObjectClass(painlessParameterClass);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw new IllegalArgumentException("struct not defined for constructor parameter [" + painlessParameterTypeName + "] " +
|
||||
"with owner struct [" + ownerStructName + "] and constructor parameters " +
|
||||
|
@ -957,9 +929,8 @@ public final class Definition {
|
|||
}
|
||||
|
||||
painlessConstructor = methodCache.computeIfAbsent(buildMethodCacheKey(ownerStruct.name, "<init>", painlessParametersTypes),
|
||||
key -> new Method("<init>", ownerStruct, null, getTypeInternal("void"), painlessParametersTypes,
|
||||
key -> new Method("<init>", ownerStruct, null, void.class, painlessParametersTypes,
|
||||
asmConstructor, javaConstructor.getModifiers(), javaHandle));
|
||||
|
||||
ownerStruct.constructors.put(painlessMethodKey, painlessConstructor);
|
||||
} else if (painlessConstructor.arguments.equals(painlessParametersTypes) == false){
|
||||
throw new IllegalArgumentException(
|
||||
|
@ -997,7 +968,7 @@ public final class Definition {
|
|||
|
||||
int augmentedOffset = javaAugmentedClass == null ? 0 : 1;
|
||||
|
||||
List<Type> painlessParametersTypes = new ArrayList<>(whitelistMethod.painlessParameterTypeNames.size());
|
||||
List<Class<?>> painlessParametersTypes = new ArrayList<>(whitelistMethod.painlessParameterTypeNames.size());
|
||||
Class<?>[] javaClassParameters = new Class<?>[whitelistMethod.painlessParameterTypeNames.size() + augmentedOffset];
|
||||
|
||||
if (javaAugmentedClass != null) {
|
||||
|
@ -1008,10 +979,10 @@ public final class Definition {
|
|||
String painlessParameterTypeName = whitelistMethod.painlessParameterTypeNames.get(parameterCount);
|
||||
|
||||
try {
|
||||
Type painlessParameterType = getTypeInternal(painlessParameterTypeName);
|
||||
Class<?> painlessParameterClass = TypeToClass(getTypeInternal(painlessParameterTypeName));
|
||||
|
||||
painlessParametersTypes.add(painlessParameterType);
|
||||
javaClassParameters[parameterCount + augmentedOffset] = painlessParameterType.clazz;
|
||||
painlessParametersTypes.add(painlessParameterClass);
|
||||
javaClassParameters[parameterCount + augmentedOffset] = defClassToObjectClass(painlessParameterClass);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw new IllegalArgumentException("struct not defined for method parameter [" + painlessParameterTypeName + "] " +
|
||||
"with owner struct [" + ownerStructName + "] and method with name [" + whitelistMethod.javaMethodName + "] " +
|
||||
|
@ -1030,18 +1001,18 @@ public final class Definition {
|
|||
javaImplClass.getName() + "]", nsme);
|
||||
}
|
||||
|
||||
Type painlessReturnType;
|
||||
Class<?> painlessReturnClass;
|
||||
|
||||
try {
|
||||
painlessReturnType = getTypeInternal(whitelistMethod.painlessReturnTypeName);
|
||||
painlessReturnClass = TypeToClass(getTypeInternal(whitelistMethod.painlessReturnTypeName));
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw new IllegalArgumentException("struct not defined for return type [" + whitelistMethod.painlessReturnTypeName + "] " +
|
||||
"with owner struct [" + ownerStructName + "] and method with name [" + whitelistMethod.javaMethodName + "] " +
|
||||
"and parameters " + whitelistMethod.painlessParameterTypeNames, iae);
|
||||
}
|
||||
|
||||
if (javaMethod.getReturnType().equals(painlessReturnType.clazz) == false) {
|
||||
throw new IllegalArgumentException("specified return type class [" + painlessReturnType.clazz + "] " +
|
||||
if (javaMethod.getReturnType() != defClassToObjectClass(painlessReturnClass)) {
|
||||
throw new IllegalArgumentException("specified return type class [" + painlessReturnClass + "] " +
|
||||
"does not match the return type class [" + javaMethod.getReturnType() + "] for the " +
|
||||
"method with name [" + whitelistMethod.javaMethodName + "] " +
|
||||
"and parameters " + whitelistMethod.painlessParameterTypeNames);
|
||||
|
@ -1065,14 +1036,14 @@ public final class Definition {
|
|||
|
||||
painlessMethod = methodCache.computeIfAbsent(
|
||||
buildMethodCacheKey(ownerStruct.name, whitelistMethod.javaMethodName, painlessParametersTypes),
|
||||
key -> new Method(whitelistMethod.javaMethodName, ownerStruct, null, painlessReturnType, painlessParametersTypes,
|
||||
key -> new Method(whitelistMethod.javaMethodName, ownerStruct, null, painlessReturnClass, painlessParametersTypes,
|
||||
asmMethod, javaMethod.getModifiers(), javaMethodHandle));
|
||||
ownerStruct.staticMethods.put(painlessMethodKey, painlessMethod);
|
||||
} else if ((painlessMethod.name.equals(whitelistMethod.javaMethodName) && painlessMethod.rtn.equals(painlessReturnType) &&
|
||||
} else if ((painlessMethod.name.equals(whitelistMethod.javaMethodName) && painlessMethod.rtn == painlessReturnClass &&
|
||||
painlessMethod.arguments.equals(painlessParametersTypes)) == false) {
|
||||
throw new IllegalArgumentException("illegal duplicate static methods [" + painlessMethodKey + "] " +
|
||||
"found within the struct [" + ownerStruct.name + "] with name [" + whitelistMethod.javaMethodName + "], " +
|
||||
"return types [" + painlessReturnType + "] and [" + painlessMethod.rtn.name + "], " +
|
||||
"return types [" + painlessReturnClass + "] and [" + painlessMethod.rtn + "], " +
|
||||
"and parameters " + painlessParametersTypes + " and " + painlessMethod.arguments);
|
||||
}
|
||||
} else {
|
||||
|
@ -1091,14 +1062,14 @@ public final class Definition {
|
|||
|
||||
painlessMethod = methodCache.computeIfAbsent(
|
||||
buildMethodCacheKey(ownerStruct.name, whitelistMethod.javaMethodName, painlessParametersTypes),
|
||||
key -> new Method(whitelistMethod.javaMethodName, ownerStruct, javaAugmentedClass, painlessReturnType,
|
||||
key -> new Method(whitelistMethod.javaMethodName, ownerStruct, javaAugmentedClass, painlessReturnClass,
|
||||
painlessParametersTypes, asmMethod, javaMethod.getModifiers(), javaMethodHandle));
|
||||
ownerStruct.methods.put(painlessMethodKey, painlessMethod);
|
||||
} else if ((painlessMethod.name.equals(whitelistMethod.javaMethodName) && painlessMethod.rtn.equals(painlessReturnType) &&
|
||||
} else if ((painlessMethod.name.equals(whitelistMethod.javaMethodName) && painlessMethod.rtn.equals(painlessReturnClass) &&
|
||||
painlessMethod.arguments.equals(painlessParametersTypes)) == false) {
|
||||
throw new IllegalArgumentException("illegal duplicate member methods [" + painlessMethodKey + "] " +
|
||||
"found within the struct [" + ownerStruct.name + "] with name [" + whitelistMethod.javaMethodName + "], " +
|
||||
"return types [" + painlessReturnType + "] and [" + painlessMethod.rtn.name + "], " +
|
||||
"return types [" + painlessReturnClass + "] and [" + painlessMethod.rtn + "], " +
|
||||
"and parameters " + painlessParametersTypes + " and " + painlessMethod.arguments);
|
||||
}
|
||||
}
|
||||
|
@ -1126,10 +1097,10 @@ public final class Definition {
|
|||
"not found for class [" + ownerStruct.clazz.getName() + "].");
|
||||
}
|
||||
|
||||
Type painlessFieldType;
|
||||
Class<?> painlessFieldClass;
|
||||
|
||||
try {
|
||||
painlessFieldType = getTypeInternal(whitelistField.painlessFieldTypeName);
|
||||
painlessFieldClass = TypeToClass(getTypeInternal(whitelistField.painlessFieldTypeName));
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw new IllegalArgumentException("struct not defined for return type [" + whitelistField.painlessFieldTypeName + "] " +
|
||||
"with owner struct [" + ownerStructName + "] and field with name [" + whitelistField.javaFieldName + "]", iae);
|
||||
|
@ -1145,11 +1116,11 @@ public final class Definition {
|
|||
|
||||
if (painlessField == null) {
|
||||
painlessField = fieldCache.computeIfAbsent(
|
||||
buildFieldCacheKey(ownerStruct.name, whitelistField.javaFieldName, painlessFieldType.name),
|
||||
buildFieldCacheKey(ownerStruct.name, whitelistField.javaFieldName, painlessFieldClass.getName()),
|
||||
key -> new Field(whitelistField.javaFieldName, javaField.getName(),
|
||||
ownerStruct, painlessFieldType, javaField.getModifiers(), null, null));
|
||||
ownerStruct, painlessFieldClass, javaField.getModifiers(), null, null));
|
||||
ownerStruct.staticMembers.put(whitelistField.javaFieldName, painlessField);
|
||||
} else if (painlessField.type.equals(painlessFieldType) == false) {
|
||||
} else if (painlessField.clazz != painlessFieldClass) {
|
||||
throw new IllegalArgumentException("illegal duplicate static fields [" + whitelistField.javaFieldName + "] " +
|
||||
"found within the struct [" + ownerStruct.name + "] with type [" + whitelistField.painlessFieldTypeName + "]");
|
||||
}
|
||||
|
@ -1174,11 +1145,11 @@ public final class Definition {
|
|||
|
||||
if (painlessField == null) {
|
||||
painlessField = fieldCache.computeIfAbsent(
|
||||
buildFieldCacheKey(ownerStruct.name, whitelistField.javaFieldName, painlessFieldType.name),
|
||||
buildFieldCacheKey(ownerStruct.name, whitelistField.javaFieldName, painlessFieldClass.getName()),
|
||||
key -> new Field(whitelistField.javaFieldName, javaField.getName(),
|
||||
ownerStruct, painlessFieldType, javaField.getModifiers(), javaMethodHandleGetter, javaMethodHandleSetter));
|
||||
ownerStruct, painlessFieldClass, javaField.getModifiers(), javaMethodHandleGetter, javaMethodHandleSetter));
|
||||
ownerStruct.members.put(whitelistField.javaFieldName, painlessField);
|
||||
} else if (painlessField.type.equals(painlessFieldType) == false) {
|
||||
} else if (painlessField.clazz != painlessFieldClass) {
|
||||
throw new IllegalArgumentException("illegal duplicate member fields [" + whitelistField.javaFieldName + "] " +
|
||||
"found within the struct [" + ownerStruct.name + "] with type [" + whitelistField.painlessFieldTypeName + "]");
|
||||
}
|
||||
|
@ -1262,7 +1233,7 @@ public final class Definition {
|
|||
for (Field field : child.members.values()) {
|
||||
if (owner.members.get(field.name) == null) {
|
||||
owner.members.put(field.name,
|
||||
new Field(field.name, field.javaName, owner, field.type, field.modifiers, field.getter, field.setter));
|
||||
new Field(field.name, field.javaName, owner, field.clazz, field.modifiers, field.getter, field.setter));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1272,51 +1243,45 @@ public final class Definition {
|
|||
* Precomputes a more efficient structure for dynamic method/field access.
|
||||
*/
|
||||
private void addRuntimeClass(final Struct struct) {
|
||||
final Map<MethodKey, Method> methods = struct.methods;
|
||||
final Map<String, MethodHandle> getters = new HashMap<>();
|
||||
final Map<String, MethodHandle> setters = new HashMap<>();
|
||||
|
||||
// add all members
|
||||
for (final Map.Entry<String, Field> member : struct.members.entrySet()) {
|
||||
getters.put(member.getKey(), member.getValue().getter);
|
||||
setters.put(member.getKey(), member.getValue().setter);
|
||||
}
|
||||
|
||||
// add all getters/setters
|
||||
for (final Map.Entry<MethodKey, Method> method : methods.entrySet()) {
|
||||
final String name = method.getKey().name;
|
||||
final Method m = method.getValue();
|
||||
for (Map.Entry<MethodKey, Method> method : struct.methods.entrySet()) {
|
||||
String name = method.getKey().name;
|
||||
Method m = method.getValue();
|
||||
|
||||
if (m.arguments.size() == 0 &&
|
||||
name.startsWith("get") &&
|
||||
name.length() > 3 &&
|
||||
Character.isUpperCase(name.charAt(3))) {
|
||||
final StringBuilder newName = new StringBuilder();
|
||||
StringBuilder newName = new StringBuilder();
|
||||
newName.append(Character.toLowerCase(name.charAt(3)));
|
||||
newName.append(name.substring(4));
|
||||
getters.putIfAbsent(newName.toString(), m.handle);
|
||||
struct.getters.putIfAbsent(newName.toString(), m.handle);
|
||||
} else if (m.arguments.size() == 0 &&
|
||||
name.startsWith("is") &&
|
||||
name.length() > 2 &&
|
||||
Character.isUpperCase(name.charAt(2))) {
|
||||
final StringBuilder newName = new StringBuilder();
|
||||
StringBuilder newName = new StringBuilder();
|
||||
newName.append(Character.toLowerCase(name.charAt(2)));
|
||||
newName.append(name.substring(3));
|
||||
getters.putIfAbsent(newName.toString(), m.handle);
|
||||
struct.getters.putIfAbsent(newName.toString(), m.handle);
|
||||
}
|
||||
|
||||
if (m.arguments.size() == 1 &&
|
||||
name.startsWith("set") &&
|
||||
name.length() > 3 &&
|
||||
Character.isUpperCase(name.charAt(3))) {
|
||||
final StringBuilder newName = new StringBuilder();
|
||||
StringBuilder newName = new StringBuilder();
|
||||
newName.append(Character.toLowerCase(name.charAt(3)));
|
||||
newName.append(name.substring(4));
|
||||
setters.putIfAbsent(newName.toString(), m.handle);
|
||||
struct.setters.putIfAbsent(newName.toString(), m.handle);
|
||||
}
|
||||
}
|
||||
|
||||
runtimeMap.put(struct.clazz, new RuntimeClass(struct, methods, getters, setters));
|
||||
// add all members
|
||||
for (Map.Entry<String, Field> member : struct.members.entrySet()) {
|
||||
struct.getters.put(member.getKey(), member.getValue().getter);
|
||||
struct.setters.put(member.getKey(), member.getValue().setter);
|
||||
}
|
||||
}
|
||||
|
||||
/** computes the functional interface method for a class, or returns null */
|
||||
|
@ -1409,7 +1374,7 @@ public final class Definition {
|
|||
}
|
||||
}
|
||||
|
||||
return new Type(name, dimensions, "def".equals(name), struct, clazz, type);
|
||||
return new Type(name, dimensions, def.class.getSimpleName().equals(name), struct, clazz, type);
|
||||
}
|
||||
|
||||
private int getDimensions(String name) {
|
||||
|
|
|
@ -20,8 +20,7 @@
|
|||
package org.elasticsearch.painless;
|
||||
|
||||
import org.elasticsearch.painless.Definition.Method;
|
||||
import org.elasticsearch.painless.Definition.Type;
|
||||
import org.elasticsearch.painless.api.Augmentation;
|
||||
import org.objectweb.asm.Type;
|
||||
|
||||
import java.lang.invoke.MethodType;
|
||||
import java.lang.reflect.Modifier;
|
||||
|
@ -63,9 +62,9 @@ public class FunctionRef {
|
|||
/** factory method type descriptor */
|
||||
public final String factoryDescriptor;
|
||||
/** functional interface method as type */
|
||||
public final org.objectweb.asm.Type interfaceType;
|
||||
public final Type interfaceType;
|
||||
/** delegate method type method as type */
|
||||
public final org.objectweb.asm.Type delegateType;
|
||||
public final Type delegateType;
|
||||
|
||||
/**
|
||||
* Creates a new FunctionRef, which will resolve {@code type::call} from the whitelist.
|
||||
|
@ -75,8 +74,9 @@ public class FunctionRef {
|
|||
* @param call the right hand side of a method reference expression
|
||||
* @param numCaptures number of captured arguments
|
||||
*/
|
||||
public FunctionRef(Definition definition, Type expected, String type, String call, int numCaptures) {
|
||||
this(expected, expected.struct.getFunctionalMethod(), lookup(definition, expected, type, call, numCaptures > 0), numCaptures);
|
||||
public FunctionRef(Definition definition, Class<?> expected, String type, String call, int numCaptures) {
|
||||
this(expected, definition.ClassToType(expected).struct.functionalMethod,
|
||||
lookup(definition, expected, type, call, numCaptures > 0), numCaptures);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,11 +86,11 @@ public class FunctionRef {
|
|||
* @param delegateMethod implementation method
|
||||
* @param numCaptures number of captured arguments
|
||||
*/
|
||||
public FunctionRef(Type expected, Method interfaceMethod, Method delegateMethod, int numCaptures) {
|
||||
public FunctionRef(Class<?> expected, Method interfaceMethod, Method delegateMethod, int numCaptures) {
|
||||
MethodType delegateMethodType = delegateMethod.getMethodType();
|
||||
|
||||
interfaceMethodName = interfaceMethod.name;
|
||||
factoryMethodType = MethodType.methodType(expected.clazz,
|
||||
factoryMethodType = MethodType.methodType(expected,
|
||||
delegateMethodType.dropParameterTypes(numCaptures, delegateMethodType.parameterCount()));
|
||||
interfaceMethodType = interfaceMethod.getMethodType().dropParameterTypes(0, 1);
|
||||
|
||||
|
@ -120,17 +120,18 @@ public class FunctionRef {
|
|||
this.delegateMethod = delegateMethod;
|
||||
|
||||
factoryDescriptor = factoryMethodType.toMethodDescriptorString();
|
||||
interfaceType = org.objectweb.asm.Type.getMethodType(interfaceMethodType.toMethodDescriptorString());
|
||||
delegateType = org.objectweb.asm.Type.getMethodType(this.delegateMethodType.toMethodDescriptorString());
|
||||
interfaceType = Type.getMethodType(interfaceMethodType.toMethodDescriptorString());
|
||||
delegateType = Type.getMethodType(this.delegateMethodType.toMethodDescriptorString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new FunctionRef (low level).
|
||||
* It is for runtime use only.
|
||||
*/
|
||||
public FunctionRef(Type expected, Method interfaceMethod, String delegateMethodName, MethodType delegateMethodType, int numCaptures) {
|
||||
public FunctionRef(Class<?> expected,
|
||||
Method interfaceMethod, String delegateMethodName, MethodType delegateMethodType, int numCaptures) {
|
||||
interfaceMethodName = interfaceMethod.name;
|
||||
factoryMethodType = MethodType.methodType(expected.clazz,
|
||||
factoryMethodType = MethodType.methodType(expected,
|
||||
delegateMethodType.dropParameterTypes(numCaptures, delegateMethodType.parameterCount()));
|
||||
interfaceMethodType = interfaceMethod.getMethodType().dropParameterTypes(0, 1);
|
||||
|
||||
|
@ -150,14 +151,14 @@ public class FunctionRef {
|
|||
/**
|
||||
* Looks up {@code type::call} from the whitelist, and returns a matching method.
|
||||
*/
|
||||
private static Definition.Method lookup(Definition definition, Definition.Type expected,
|
||||
private static Definition.Method lookup(Definition definition, Class<?> expected,
|
||||
String type, String call, boolean receiverCaptured) {
|
||||
// check its really a functional interface
|
||||
// for e.g. Comparable
|
||||
Method method = expected.struct.getFunctionalMethod();
|
||||
Method method = definition.ClassToType(expected).struct.functionalMethod;
|
||||
if (method == null) {
|
||||
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " +
|
||||
"to [" + expected.name + "], not a functional interface");
|
||||
"to [" + Definition.ClassToName(expected) + "], not a functional interface");
|
||||
}
|
||||
|
||||
// lookup requested method
|
||||
|
|
|
@ -93,7 +93,7 @@ import static org.objectweb.asm.Opcodes.H_NEWINVOKESPECIAL;
|
|||
* private $$Lambda0(List arg$0) {
|
||||
* this.arg$0 = arg$0;
|
||||
* }
|
||||
*
|
||||
*
|
||||
* public static Consumer create$lambda(List arg$0) {
|
||||
* return new $$Lambda0(arg$0);
|
||||
* }
|
||||
|
@ -212,7 +212,7 @@ public final class LambdaBootstrap {
|
|||
ClassWriter cw = beginLambdaClass(lambdaClassName, factoryMethodType.returnType());
|
||||
Capture[] captures = generateCaptureFields(cw, factoryMethodType);
|
||||
generateLambdaConstructor(cw, lambdaClassType, factoryMethodType, captures);
|
||||
|
||||
|
||||
// Handles the special case where a method reference refers to a ctor (we need a static wrapper method):
|
||||
if (delegateInvokeType == H_NEWINVOKESPECIAL) {
|
||||
assert CTOR_METHOD_NAME.equals(delegateMethodName);
|
||||
|
@ -226,7 +226,7 @@ public final class LambdaBootstrap {
|
|||
generateInterfaceMethod(cw, factoryMethodType, lambdaClassType, interfaceMethodName,
|
||||
interfaceMethodType, delegateClassType, delegateInvokeType,
|
||||
delegateMethodName, delegateMethodType, captures);
|
||||
|
||||
|
||||
endLambdaClass(cw);
|
||||
|
||||
Class<?> lambdaClass = createLambdaClass(loader, cw, lambdaClassType);
|
||||
|
@ -321,7 +321,7 @@ public final class LambdaBootstrap {
|
|||
|
||||
constructor.returnValue();
|
||||
constructor.endMethod();
|
||||
|
||||
|
||||
// Add a factory method, if lambda takes captures.
|
||||
// @uschindler says: I talked with Rémi Forax about this. Technically, a plain ctor
|
||||
// and a MethodHandle to the ctor would be enough - BUT: Hotspot is unable to
|
||||
|
@ -337,10 +337,10 @@ public final class LambdaBootstrap {
|
|||
/**
|
||||
* Generates a factory method to delegate to constructors.
|
||||
*/
|
||||
private static void generateStaticCtorDelegator(ClassWriter cw, int access, String delegatorMethodName,
|
||||
private static void generateStaticCtorDelegator(ClassWriter cw, int access, String delegatorMethodName,
|
||||
Type delegateClassType, MethodType delegateMethodType) {
|
||||
Method wrapperMethod = new Method(delegatorMethodName, delegateMethodType.toMethodDescriptorString());
|
||||
Method constructorMethod =
|
||||
Method constructorMethod =
|
||||
new Method(CTOR_METHOD_NAME, delegateMethodType.changeReturnType(void.class).toMethodDescriptorString());
|
||||
int modifiers = access | ACC_STATIC;
|
||||
|
||||
|
@ -379,7 +379,7 @@ public final class LambdaBootstrap {
|
|||
GeneratorAdapter iface = new GeneratorAdapter(modifiers, lamMeth,
|
||||
cw.visitMethod(modifiers, interfaceMethodName, lamDesc, null, null));
|
||||
iface.visitCode();
|
||||
|
||||
|
||||
// Loads any captured variables onto the stack.
|
||||
for (int captureCount = 0; captureCount < captures.length; ++captureCount) {
|
||||
iface.loadThis();
|
||||
|
@ -473,7 +473,7 @@ public final class LambdaBootstrap {
|
|||
private static CallSite createNoCaptureCallSite(
|
||||
MethodType factoryMethodType,
|
||||
Class<?> lambdaClass) {
|
||||
|
||||
|
||||
try {
|
||||
return new ConstantCallSite(MethodHandles.constant(
|
||||
factoryMethodType.returnType(), lambdaClass.getConstructor().newInstance()));
|
||||
|
@ -503,7 +503,7 @@ public final class LambdaBootstrap {
|
|||
* delegate method will use converted types from the interface method. Using
|
||||
* invokedynamic to make the delegate method call allows
|
||||
* {@link MethodHandle#asType} to be used to do the type conversion instead
|
||||
* of either a lot more code or requiring many {@link Definition.Type}s to be looked
|
||||
* of either a lot more code or requiring many {@link Class}es to be looked
|
||||
* up at link-time.
|
||||
*/
|
||||
public static CallSite delegateBootstrap(Lookup lookup,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue