Merge branch 'master' into pr/s3-path-style-access
# Conflicts: # plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java # plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java # plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java # plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java
This commit is contained in:
commit
d7eb375d24
|
@ -82,4 +82,7 @@
|
|||
(c-set-offset 'func-decl-cont '++)
|
||||
))
|
||||
(c-basic-offset . 4)
|
||||
(c-comment-only-line-offset . (0 . 0)))))
|
||||
(c-comment-only-line-offset . (0 . 0))
|
||||
(fill-column . 140)
|
||||
(fci-rule-column . 140)
|
||||
(compile-command . "gradle compileTestJava"))))
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
<!--
|
||||
GitHub is reserved for bug reports and feature requests. The best place
|
||||
to ask a general question is at the Elastic Discourse forums at
|
||||
https://discuss.elastic.co. If you are in fact posting a bug report or
|
||||
a feature request, please include one and only one of the below blocks
|
||||
in your new issue.
|
||||
-->
|
||||
|
||||
<!--
|
||||
If you are filing a bug report, please remove the below feature
|
||||
request block and provide responses for all of the below items.
|
||||
-->
|
||||
|
||||
**Elasticsearch version**:
|
||||
|
||||
**JVM version**:
|
||||
|
||||
**OS version**:
|
||||
|
||||
**Description of the problem including expected versus actual behavior**:
|
||||
|
||||
**Steps to reproduce**:
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
**Provide logs (if relevant)**:
|
||||
|
||||
<!--
|
||||
If you are filing a feature request, please remove the above bug
|
||||
report block and provide responses for all of the below items.
|
||||
-->
|
||||
|
||||
**Describe the feature**:
|
|
@ -0,0 +1,13 @@
|
|||
<!--
|
||||
Thank you for your interest in and contributing to Elasticsearch! There
|
||||
are a few simple things to check before submitting your pull request
|
||||
that can help with the review process. You should delete these items
|
||||
from your submission, but they are here to help bring them to your
|
||||
attention.
|
||||
-->
|
||||
|
||||
- Have you signed the [contributor license agreement](https://www.elastic.co/contributor-agreement)?
|
||||
- Have you followed the [contributor guidelines](https://github.com/elastic/elasticsearch/blob/master/.github/CONTRIBUTING.md)?
|
||||
- If submitting code, have you built your formula locally prior to submission with `gradle check`?
|
||||
- If submitting code, is your pull request against master? Unless there is a good reason otherwise, we prefer pull requests against master and will backport as needed.
|
||||
- If submitting code, have you checked that your submission is for an [OS that we support](https://www.elastic.co/support/matrix#show_os)?
|
|
@ -1,37 +1,42 @@
|
|||
|
||||
# intellij files
|
||||
.idea/
|
||||
.gradle/
|
||||
*.iml
|
||||
*.ipr
|
||||
*.iws
|
||||
work/
|
||||
/data/
|
||||
logs/
|
||||
.DS_Store
|
||||
build/
|
||||
generated-resources/
|
||||
**/.local*
|
||||
docs/html/
|
||||
docs/build.log
|
||||
/tmp/
|
||||
backwards/
|
||||
html_docs
|
||||
.vagrant/
|
||||
build-idea/
|
||||
|
||||
## eclipse ignores (use 'mvn eclipse:eclipse' to build eclipse projects)
|
||||
## All files (.project, .classpath, .settings/*) should be generated through Maven which
|
||||
## will correctly set the classpath based on the declared dependencies and write settings
|
||||
## files to ensure common coding style across Eclipse and IDEA.
|
||||
# eclipse files
|
||||
.project
|
||||
.classpath
|
||||
eclipse-build
|
||||
.settings
|
||||
build-eclipse/
|
||||
|
||||
## netbeans ignores
|
||||
# netbeans files
|
||||
nb-configuration.xml
|
||||
nbactions.xml
|
||||
|
||||
dependency-reduced-pom.xml
|
||||
# gradle stuff
|
||||
.gradle/
|
||||
build/
|
||||
|
||||
# old patterns specific to maven
|
||||
# maven stuff (to be removed when trunk becomes 4.x)
|
||||
*-execution-hints.log
|
||||
target/
|
||||
dependency-reduced-pom.xml
|
||||
|
||||
# testing stuff
|
||||
**/.local*
|
||||
.vagrant/
|
||||
|
||||
# osx stuff
|
||||
.DS_Store
|
||||
|
||||
# needed in case docs build is run...maybe we can configure doc build to generate files under build?
|
||||
html_docs
|
||||
|
||||
# random old stuff that we should look at the necessity of...
|
||||
/tmp/
|
||||
backwards/
|
||||
eclipse-build
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
-/plugins/discovery-azure/target
|
||||
-/plugins/discovery-ec2/target
|
||||
-/plugins/discovery-gce/target
|
||||
-/plugins/discovery-multicast/target
|
||||
-/plugins/jvm-example/target
|
||||
-/plugins/lang-expression/target
|
||||
-/plugins/lang-groovy/target
|
||||
|
|
|
@ -84,7 +84,9 @@ Please follow these formatting guidelines:
|
|||
* Line width is 140 characters
|
||||
* The rest is left to Java coding standards
|
||||
* Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do.
|
||||
* Don't worry too much about imports. Try not to change the order but don't worry about fighting your IDE to stop it from switching from * imports to specific imports or from specific to * imports.
|
||||
* Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them.
|
||||
* Eclipse: Preferences->Java->Code Style->Organize Imports. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value.
|
||||
* Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so.
|
||||
|
||||
To create a distribution from the source, simply run:
|
||||
|
||||
|
@ -93,12 +95,10 @@ cd elasticsearch/
|
|||
gradle assemble
|
||||
```
|
||||
|
||||
You will find the newly built packages under: `./distribution/build/distributions/`.
|
||||
You will find the newly built packages under: `./distribution/(deb|rpm|tar|zip)/build/distributions/`.
|
||||
|
||||
Before submitting your changes, run the test suite to make sure that nothing is broken, with:
|
||||
|
||||
```sh
|
||||
gradle check
|
||||
```
|
||||
|
||||
Source: [Contributing to elasticsearch](https://www.elastic.co/contributing-to-elasticsearch/)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
Elasticsearch
|
||||
Copyright 2009-2015 Elasticsearch
|
||||
Copyright 2009-2016 Elasticsearch
|
||||
|
||||
This product includes software developed by The Apache Software
|
||||
Foundation (http://www.apache.org/).
|
||||
|
|
|
@ -9,7 +9,7 @@ Elasticsearch is a distributed RESTful search engine built for the cloud. Featur
|
|||
* Distributed and Highly Available Search Engine.
|
||||
** Each index is fully sharded with a configurable number of shards.
|
||||
** Each shard can have one or more replicas.
|
||||
** Read / Search operations performed on either one of the replica shard.
|
||||
** Read / Search operations performed on any of the replica shards.
|
||||
* Multi Tenant with Multi Types.
|
||||
** Support for more than one index.
|
||||
** Support for more than one type per index.
|
||||
|
@ -147,7 +147,7 @@ curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
|
|||
}'
|
||||
</pre>
|
||||
|
||||
The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get his own special index.
|
||||
The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get their own special index.
|
||||
|
||||
Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
|
||||
|
||||
|
@ -222,7 +222,7 @@ h1. License
|
|||
<pre>
|
||||
This software is licensed under the Apache License, version 2 ("ALv2"), quoted below.
|
||||
|
||||
Copyright 2009-2015 Elasticsearch <https://www.elastic.co>
|
||||
Copyright 2009-2016 Elasticsearch <https://www.elastic.co>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
use this file except in compliance with the License. You may obtain a copy of
|
||||
|
|
|
@ -290,14 +290,14 @@ The REST tests are run automatically when executing the "gradle check" command.
|
|||
REST tests use the following command:
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle :distribution:tar:integTest \
|
||||
gradle :distribution:integ-test-zip:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
A specific test case can be run with
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle :distribution:tar:integTest \
|
||||
gradle :distribution:integ-test-zip:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT \
|
||||
-Dtests.method="test {p0=cat.shards/10_basic/Help}"
|
||||
---------------------------------------------------------------------------
|
||||
|
@ -345,20 +345,21 @@ gradle :qa:vagrant:checkVagrantVersion
|
|||
-------------------------------------
|
||||
|
||||
. Download and smoke test the VMs with `gradle vagrantSmokeTest` or
|
||||
`gradle vagrantSmokeTestAllDistros`. The first time you run this it will
|
||||
`gradle -Pvagrant.boxes=all vagrantSmokeTest`. The first time you run this it will
|
||||
download the base images and provision the boxes and immediately quit. If you
|
||||
you this again it'll skip the download step.
|
||||
|
||||
. Run the tests with `gradle checkPackages`. This will cause gradle to build
|
||||
. Run the tests with `gradle packagingTest`. This will cause gradle to build
|
||||
the tar, zip, and deb packages and all the plugins. It will then run the tests
|
||||
on ubuntu-1404 and centos-7. We chose those two distributions as the default
|
||||
because they cover deb and rpm packaging and SyvVinit and systemd.
|
||||
|
||||
You can run on all the VMs by running `gradle checkPackagesAllDistros`. You can
|
||||
run a particular VM with a command like `gradle checkOel7`. See `gradle tasks`
|
||||
for a list. Its important to know that if you ctrl-c any of these `gradle`
|
||||
commands then the boxes will remain running and you'll have to terminate them
|
||||
with `vagrant halt`.
|
||||
You can run on all the VMs by running `gradle -Pvagrant.boxes=all packagingTest`.
|
||||
You can run a particular VM with a command like
|
||||
`gradle -Pvagrant.boxes=oel-7 packagingTest`. See `gradle tasks` for a complete
|
||||
list of available vagrant boxes for testing. It's important to know that if you
|
||||
ctrl-c any of these `gradle` commands then the boxes will remain running and
|
||||
you'll have to terminate them with 'gradle stop'.
|
||||
|
||||
All the regular vagrant commands should just work so you can get a shell in a
|
||||
VM running trusty by running
|
||||
|
@ -387,7 +388,7 @@ We're missing the follow because our tests are very linux/bash centric:
|
|||
|
||||
* Windows Server 2012
|
||||
|
||||
Its important to think of VMs like cattle. If they become lame you just shoot
|
||||
It's important to think of VMs like cattle. If they become lame you just shoot
|
||||
them and let vagrant reprovision them. Say you've hosed your precise VM:
|
||||
|
||||
----------------------------------------------------
|
||||
|
@ -432,7 +433,7 @@ and in another window:
|
|||
|
||||
----------------------------------------------------
|
||||
vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7
|
||||
cd $RPM
|
||||
cd $TESTROOT
|
||||
sudo bats $BATS/*rpm*.bats
|
||||
----------------------------------------------------
|
||||
|
||||
|
@ -440,7 +441,7 @@ If you wanted to retest all the release artifacts on a single VM you could:
|
|||
|
||||
-------------------------------------------------
|
||||
gradle prepareTestRoot
|
||||
vagrant up trusty --provider virtualbox && vagrant ssh trusty
|
||||
vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404
|
||||
cd $TESTROOT
|
||||
sudo bats $BATS/*.bats
|
||||
-------------------------------------------------
|
||||
|
|
|
@ -23,15 +23,15 @@
|
|||
|
||||
Vagrant.configure(2) do |config|
|
||||
config.vm.define "ubuntu-1204" do |config|
|
||||
config.vm.box = "ubuntu/precise64"
|
||||
config.vm.box = "elastic/ubuntu-12.04-x86_64"
|
||||
ubuntu_common config
|
||||
end
|
||||
config.vm.define "ubuntu-1404" do |config|
|
||||
config.vm.box = "ubuntu/trusty64"
|
||||
config.vm.box = "elastic/ubuntu-14.04-x86_64"
|
||||
ubuntu_common config
|
||||
end
|
||||
config.vm.define "ubuntu-1504" do |config|
|
||||
config.vm.box = "ubuntu/vivid64"
|
||||
config.vm.box = "elastic/ubuntu-15.04-x86_64"
|
||||
ubuntu_common config, extra: <<-SHELL
|
||||
# Install Jayatana so we can work around it being present.
|
||||
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
|
||||
|
@ -41,44 +41,35 @@ Vagrant.configure(2) do |config|
|
|||
# get the sun jdk on there just aren't worth it. We have jessie for testing
|
||||
# debian and it works fine.
|
||||
config.vm.define "debian-8" do |config|
|
||||
config.vm.box = "debian/jessie64"
|
||||
deb_common config,
|
||||
'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
|
||||
config.vm.box = "elastic/debian-8-x86_64"
|
||||
deb_common config, 'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
|
||||
end
|
||||
config.vm.define "centos-6" do |config|
|
||||
config.vm.box = "boxcutter/centos67"
|
||||
config.vm.box = "elastic/centos-6-x86_64"
|
||||
rpm_common config
|
||||
end
|
||||
config.vm.define "centos-7" do |config|
|
||||
# There is a centos/7 box but it doesn't have rsync or virtualbox guest
|
||||
# stuff on there so its slow to use. So chef it is....
|
||||
config.vm.box = "boxcutter/centos71"
|
||||
config.vm.box = "elastic/centos-7-x86_64"
|
||||
rpm_common config
|
||||
end
|
||||
config.vm.define "oel-6" do |config|
|
||||
config.vm.box = "elastic/oraclelinux-6-x86_64"
|
||||
rpm_common config
|
||||
end
|
||||
# This box hangs _forever_ on ```yum check-update```. I have no idea why.
|
||||
# config.vm.define "oel-6", autostart: false do |config|
|
||||
# config.vm.box = "boxcutter/oel66"
|
||||
# rpm_common(config)
|
||||
# end
|
||||
config.vm.define "oel-7" do |config|
|
||||
config.vm.box = "boxcutter/oel70"
|
||||
config.vm.box = "elastic/oraclelinux-7-x86_64"
|
||||
rpm_common config
|
||||
end
|
||||
config.vm.define "fedora-22" do |config|
|
||||
# Fedora hosts their own 'cloud' images that aren't in Vagrant's Atlas but
|
||||
# and are missing required stuff like rsync. It'd be nice if we could use
|
||||
# them but they much slower to get up and running then the boxcutter image.
|
||||
config.vm.box = "boxcutter/fedora22"
|
||||
config.vm.box = "elastic/fedora-22-x86_64"
|
||||
dnf_common config
|
||||
end
|
||||
config.vm.define "opensuse-13" do |config|
|
||||
config.vm.box = "chef/opensuse-13"
|
||||
config.vm.box_url = "http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_opensuse-13.2-x86_64_chef-provisionerless.box"
|
||||
config.vm.box = "elastic/opensuse-13-x86_64"
|
||||
opensuse_common config
|
||||
end
|
||||
# The SLES boxes are not considered to be highest quality, but seem to be sufficient for a test run
|
||||
config.vm.define "sles-12" do |config|
|
||||
config.vm.box = "idar/sles12"
|
||||
config.vm.box = "elastic/sles-12-x86_64"
|
||||
sles_common config
|
||||
end
|
||||
# Switch the default share for the project root from /vagrant to
|
||||
|
|
94
build.gradle
94
build.gradle
|
@ -18,7 +18,10 @@
|
|||
*/
|
||||
|
||||
import com.bmuschko.gradle.nexus.NexusPlugin
|
||||
import org.eclipse.jgit.lib.Repository
|
||||
import org.eclipse.jgit.lib.RepositoryBuilder
|
||||
import org.gradle.plugins.ide.eclipse.model.SourceFolder
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
|
||||
// common maven publishing configuration
|
||||
subprojects {
|
||||
|
@ -50,21 +53,37 @@ subprojects {
|
|||
javadoc = true
|
||||
tests = false
|
||||
}
|
||||
nexus {
|
||||
String buildSnapshot = System.getProperty('build.snapshot', 'true')
|
||||
if (buildSnapshot == 'false') {
|
||||
Repository repo = new RepositoryBuilder().findGitDir(new File('.')).build()
|
||||
String shortHash = repo.resolve('HEAD')?.name?.substring(0,7)
|
||||
repositoryUrl = project.hasProperty('build.repository') ? project.property('build.repository') : "file://${System.getenv('HOME')}/elasticsearch-releases/${version}-${shortHash}/"
|
||||
}
|
||||
}
|
||||
// we have our own username/password prompts so that they only happen once
|
||||
// TODO: add gpg signing prompts
|
||||
// TODO: add gpg signing prompts, which is tricky, as the buildDeb/buildRpm tasks are executed before this code block
|
||||
project.gradle.taskGraph.whenReady { taskGraph ->
|
||||
if (taskGraph.allTasks.any { it.name == 'uploadArchives' }) {
|
||||
Console console = System.console()
|
||||
if (project.hasProperty('nexusUsername') == false) {
|
||||
String nexusUsername = console.readLine('\nNexus username: ')
|
||||
// no need for username/password on local deploy
|
||||
if (project.nexus.repositoryUrl.startsWith('file://')) {
|
||||
project.rootProject.allprojects.each {
|
||||
it.ext.nexusUsername = nexusUsername
|
||||
it.ext.nexusUsername = 'foo'
|
||||
it.ext.nexusPassword = 'bar'
|
||||
}
|
||||
}
|
||||
if (project.hasProperty('nexusPassword') == false) {
|
||||
String nexusPassword = new String(console.readPassword('\nNexus password: '))
|
||||
project.rootProject.allprojects.each {
|
||||
it.ext.nexusPassword = nexusPassword
|
||||
} else {
|
||||
if (project.hasProperty('nexusUsername') == false) {
|
||||
String nexusUsername = console.readLine('\nNexus username: ')
|
||||
project.rootProject.allprojects.each {
|
||||
it.ext.nexusUsername = nexusUsername
|
||||
}
|
||||
}
|
||||
if (project.hasProperty('nexusPassword') == false) {
|
||||
String nexusPassword = new String(console.readPassword('\nNexus password: '))
|
||||
project.rootProject.allprojects.each {
|
||||
it.ext.nexusPassword = nexusPassword
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -75,8 +94,9 @@ subprojects {
|
|||
allprojects {
|
||||
// injecting groovy property variables into all projects
|
||||
project.ext {
|
||||
// for eclipse hacks...
|
||||
// for ide hacks...
|
||||
isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse')
|
||||
isIdea = System.getProperty("idea.active") != null || gradle.startParameter.taskNames.contains('idea') || gradle.startParameter.taskNames.contains('cleanIdea')
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -97,6 +117,7 @@ subprojects {
|
|||
// the "value" -quiet is added, separated by a space. This is ok since the javadoc
|
||||
// command already adds -quiet, so we are just duplicating it
|
||||
// see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959
|
||||
javadoc.options.encoding='UTF8'
|
||||
javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet')
|
||||
}
|
||||
}
|
||||
|
@ -108,12 +129,13 @@ subprojects {
|
|||
ext.projectSubstitutions = [
|
||||
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
|
||||
"org.elasticsearch:elasticsearch:${version}": ':core',
|
||||
"org.elasticsearch:test-framework:${version}": ':test-framework',
|
||||
"org.elasticsearch.test:framework:${version}": ':test:framework',
|
||||
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip',
|
||||
"org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip',
|
||||
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar',
|
||||
"org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm',
|
||||
"org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb',
|
||||
"org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage',
|
||||
]
|
||||
configurations.all {
|
||||
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->
|
||||
|
@ -130,8 +152,8 @@ subprojects {
|
|||
// the dependency is added.
|
||||
gradle.projectsEvaluated {
|
||||
allprojects {
|
||||
if (project.path == ':test-framework') {
|
||||
// :test-framework:test cannot run before and after :core:test
|
||||
if (project.path == ':test:framework') {
|
||||
// :test:framework:test cannot run before and after :core:test
|
||||
return
|
||||
}
|
||||
configurations.all {
|
||||
|
@ -168,6 +190,36 @@ gradle.projectsEvaluated {
|
|||
// intellij configuration
|
||||
allprojects {
|
||||
apply plugin: 'idea'
|
||||
|
||||
if (isIdea) {
|
||||
project.buildDir = file('build-idea')
|
||||
}
|
||||
idea {
|
||||
module {
|
||||
inheritOutputDirs = false
|
||||
outputDir = file('build-idea/classes/main')
|
||||
testOutputDir = file('build-idea/classes/test')
|
||||
|
||||
// also ignore other possible build dirs
|
||||
excludeDirs += file('build')
|
||||
excludeDirs += file('build-eclipse')
|
||||
|
||||
iml {
|
||||
// fix so that Gradle idea plugin properly generates support for resource folders
|
||||
// see also https://issues.gradle.org/browse/GRADLE-2975
|
||||
withXml {
|
||||
it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/main/resources' }.each {
|
||||
it.attributes().remove('isTestSource')
|
||||
it.attributes().put('type', 'java-resource')
|
||||
}
|
||||
it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/test/resources' }.each {
|
||||
it.attributes().remove('isTestSource')
|
||||
it.attributes().put('type', 'java-test-resource')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
idea {
|
||||
|
@ -195,16 +247,28 @@ tasks.idea.dependsOn(buildSrcIdea)
|
|||
// eclipse configuration
|
||||
allprojects {
|
||||
apply plugin: 'eclipse'
|
||||
// Name all the non-root projects after their path so that paths get grouped together when imported into eclipse.
|
||||
if (path != ':') {
|
||||
eclipse.project.name = path
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
eclipse.project.name = eclipse.project.name.replace(':', '_')
|
||||
}
|
||||
}
|
||||
|
||||
plugins.withType(JavaBasePlugin) {
|
||||
eclipse.classpath.defaultOutputDir = new File(project.buildDir, 'eclipse')
|
||||
File eclipseBuild = project.file('build-eclipse')
|
||||
eclipse.classpath.defaultOutputDir = eclipseBuild
|
||||
if (isEclipse) {
|
||||
// set this so generated dirs will be relative to eclipse build
|
||||
project.buildDir = eclipseBuild
|
||||
}
|
||||
eclipse.classpath.file.whenMerged { classpath ->
|
||||
// give each source folder a unique corresponding output folder
|
||||
int i = 0;
|
||||
classpath.entries.findAll { it instanceof SourceFolder }.each { folder ->
|
||||
i++;
|
||||
// this is *NOT* a path or a file.
|
||||
folder.output = "build/eclipse/" + i
|
||||
folder.output = "build-eclipse/" + i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import java.nio.file.Files
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
|
@ -40,6 +42,15 @@ archivesBaseName = 'build-tools'
|
|||
Properties props = new Properties()
|
||||
props.load(project.file('version.properties').newDataInputStream())
|
||||
version = props.getProperty('elasticsearch')
|
||||
boolean snapshot = "true".equals(System.getProperty("build.snapshot", "true"));
|
||||
if (snapshot) {
|
||||
// we update the version property to reflect if we are building a snapshot or a release build
|
||||
// we write this back out below to load it in the Build.java which will be shown in rest main action
|
||||
// to indicate this being a snapshot build or a release build.
|
||||
version += "-SNAPSHOT"
|
||||
props.put("elasticsearch", version);
|
||||
}
|
||||
|
||||
|
||||
repositories {
|
||||
mavenCentral()
|
||||
|
@ -63,11 +74,26 @@ dependencies {
|
|||
compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
|
||||
compile 'de.thetaphi:forbiddenapis:2.0'
|
||||
compile 'com.bmuschko:gradle-nexus-plugin:2.3.1'
|
||||
compile 'org.apache.rat:apache-rat:0.11'
|
||||
}
|
||||
|
||||
File tempPropertiesFile = new File(project.buildDir, "version.properties")
|
||||
task writeVersionProperties {
|
||||
inputs.properties(props)
|
||||
outputs.file(tempPropertiesFile)
|
||||
doLast {
|
||||
OutputStream stream = Files.newOutputStream(tempPropertiesFile.toPath());
|
||||
try {
|
||||
props.store(stream, "UTF-8");
|
||||
} finally {
|
||||
stream.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
processResources {
|
||||
inputs.file('version.properties')
|
||||
from 'version.properties'
|
||||
dependsOn writeVersionProperties
|
||||
from tempPropertiesFile
|
||||
}
|
||||
|
||||
extraArchive {
|
||||
|
@ -75,9 +101,17 @@ extraArchive {
|
|||
tests = false
|
||||
}
|
||||
|
||||
idea {
|
||||
module {
|
||||
inheritOutputDirs = false
|
||||
outputDir = file('build-idea/classes/main')
|
||||
testOutputDir = file('build-idea/classes/test')
|
||||
}
|
||||
}
|
||||
|
||||
eclipse {
|
||||
classpath {
|
||||
defaultOutputDir = new File(file('build'), 'eclipse')
|
||||
defaultOutputDir = file('build-eclipse')
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package com.carrotsearch.gradle.junit4
|
|||
|
||||
import com.carrotsearch.ant.tasks.junit4.ListenersList
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
|
||||
import com.esotericsoftware.kryo.serializers.FieldSerializer
|
||||
import groovy.xml.NamespaceBuilder
|
||||
import groovy.xml.NamespaceBuilderSupport
|
||||
import org.apache.tools.ant.BuildException
|
||||
|
@ -14,7 +13,10 @@ import org.gradle.api.file.FileCollection
|
|||
import org.gradle.api.file.FileTreeElement
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
import org.gradle.api.specs.Spec
|
||||
import org.gradle.api.tasks.*
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputDirectory
|
||||
import org.gradle.api.tasks.Optional
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.api.tasks.util.PatternFilterable
|
||||
import org.gradle.api.tasks.util.PatternSet
|
||||
import org.gradle.logging.ProgressLoggerFactory
|
||||
|
@ -78,7 +80,7 @@ class RandomizedTestingTask extends DefaultTask {
|
|||
@Input
|
||||
String argLine = null
|
||||
|
||||
Map<String, String> systemProperties = new HashMap<>()
|
||||
Map<String, Object> systemProperties = new HashMap<>()
|
||||
PatternFilterable patternSet = new PatternSet()
|
||||
|
||||
RandomizedTestingTask() {
|
||||
|
@ -100,7 +102,7 @@ class RandomizedTestingTask extends DefaultTask {
|
|||
jvmArgs.add(argument)
|
||||
}
|
||||
|
||||
void systemProperty(String property, String value) {
|
||||
void systemProperty(String property, Object value) {
|
||||
systemProperties.put(property, value)
|
||||
}
|
||||
|
||||
|
@ -245,8 +247,8 @@ class RandomizedTestingTask extends DefaultTask {
|
|||
exclude(name: excludePattern)
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, String> prop : systemProperties) {
|
||||
sysproperty key: prop.getKey(), value: prop.getValue()
|
||||
for (Map.Entry<String, Object> prop : systemProperties) {
|
||||
sysproperty key: prop.getKey(), value: prop.getValue().toString()
|
||||
}
|
||||
makeListeners()
|
||||
}
|
||||
|
|
|
@ -27,10 +27,13 @@ import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultE
|
|||
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
|
||||
import org.gradle.logging.ProgressLogger
|
||||
import org.gradle.logging.ProgressLoggerFactory
|
||||
import org.junit.runner.Description
|
||||
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.*
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.ERROR
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.FAILURE
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.IGNORED
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.IGNORED_ASSUMPTION
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.OK
|
||||
import static java.lang.Math.max
|
||||
|
||||
/**
|
||||
|
|
|
@ -5,8 +5,21 @@ import com.carrotsearch.ant.tasks.junit4.Pluralize
|
|||
import com.carrotsearch.ant.tasks.junit4.TestsSummaryEventListener
|
||||
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.base.Strings
|
||||
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.eventbus.Subscribe
|
||||
import com.carrotsearch.ant.tasks.junit4.events.*
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.*
|
||||
import com.carrotsearch.ant.tasks.junit4.events.EventType
|
||||
import com.carrotsearch.ant.tasks.junit4.events.IEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.IStreamEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.SuiteStartedEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.TestFinishedEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedQuitEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteStartedEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.ChildBootstrap
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.HeartBeatEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.PartialOutputEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus
|
||||
import com.carrotsearch.ant.tasks.junit4.events.mirrors.FailureMirror
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.StackTraceFilter
|
||||
|
@ -15,16 +28,17 @@ import org.gradle.api.logging.LogLevel
|
|||
import org.gradle.api.logging.Logger
|
||||
import org.junit.runner.Description
|
||||
|
||||
import javax.sound.sampled.AudioSystem
|
||||
import javax.sound.sampled.Clip
|
||||
import javax.sound.sampled.Line
|
||||
import javax.sound.sampled.LineEvent
|
||||
import javax.sound.sampled.LineListener
|
||||
import java.util.concurrent.atomic.AtomicBoolean
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
|
||||
import javax.sound.sampled.AudioSystem;
|
||||
import javax.sound.sampled.Clip;
|
||||
import javax.sound.sampled.Line;
|
||||
import javax.sound.sampled.LineEvent;
|
||||
import javax.sound.sampled.LineListener;
|
||||
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.*
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDescription
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatTime
|
||||
import static com.carrotsearch.gradle.junit4.TestLoggingConfiguration.OutputMode
|
||||
|
||||
class TestReportLogger extends TestsSummaryEventListener implements AggregatedEventListener {
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
import org.apache.tools.ant.BuildListener
|
||||
import org.apache.tools.ant.BuildLogger
|
||||
import org.apache.tools.ant.DefaultLogger
|
||||
import org.apache.tools.ant.Project
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
import java.nio.charset.Charset
|
||||
|
||||
/**
|
||||
* A task which will run ant commands.
|
||||
*
|
||||
* Logging for the task is customizable for subclasses by overriding makeLogger.
|
||||
*/
|
||||
public abstract class AntTask extends DefaultTask {
|
||||
|
||||
/**
|
||||
* A buffer that will contain the output of the ant code run,
|
||||
* if the output was not already written directly to stdout.
|
||||
*/
|
||||
public final ByteArrayOutputStream outputBuffer = new ByteArrayOutputStream()
|
||||
|
||||
@TaskAction
|
||||
final void executeTask() {
|
||||
AntBuilder ant = new AntBuilder()
|
||||
|
||||
// remove existing loggers, we add our own
|
||||
List<BuildLogger> toRemove = new ArrayList<>();
|
||||
for (BuildListener listener : ant.project.getBuildListeners()) {
|
||||
if (listener instanceof BuildLogger) {
|
||||
toRemove.add(listener);
|
||||
}
|
||||
}
|
||||
for (BuildLogger listener : toRemove) {
|
||||
ant.project.removeBuildListener(listener)
|
||||
}
|
||||
|
||||
// otherwise groovy replaces System.out, and you have no chance to debug
|
||||
// ant.saveStreams = false
|
||||
|
||||
final int outputLevel = logger.isDebugEnabled() ? Project.MSG_DEBUG : Project.MSG_INFO
|
||||
final PrintStream stream = useStdout() ? System.out : new PrintStream(outputBuffer, true, Charset.defaultCharset().name())
|
||||
BuildLogger antLogger = makeLogger(stream, outputLevel)
|
||||
|
||||
ant.project.addBuildListener(antLogger)
|
||||
try {
|
||||
runAnt(ant)
|
||||
} catch (Exception e) {
|
||||
// ant failed, so see if we have buffered output to emit, then rethrow the failure
|
||||
String buffer = outputBuffer.toString()
|
||||
if (buffer.isEmpty() == false) {
|
||||
logger.error("=== Ant output ===\n${buffer}")
|
||||
}
|
||||
throw e
|
||||
}
|
||||
}
|
||||
|
||||
/** Runs the doAnt closure. This can be overridden by subclasses instead of having to set a closure. */
|
||||
protected abstract void runAnt(AntBuilder ant)
|
||||
|
||||
/** Create the logger the ant runner will use, with the given stream for error/output. */
|
||||
protected BuildLogger makeLogger(PrintStream stream, int outputLevel) {
|
||||
return new DefaultLogger(
|
||||
errorPrintStream: stream,
|
||||
outputPrintStream: stream,
|
||||
messageOutputLevel: outputLevel)
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the ant logger should write to stdout, or false if to the buffer.
|
||||
* The default implementation writes to the buffer when gradle info logging is disabled.
|
||||
*/
|
||||
protected boolean useStdout() {
|
||||
return logger.isInfoEnabled()
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -18,22 +18,30 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
import org.gradle.process.ExecResult
|
||||
|
||||
import java.time.ZonedDateTime
|
||||
import java.time.ZoneOffset
|
||||
|
||||
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.artifacts.*
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.JavaVersion
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.XmlProvider
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.artifacts.ModuleDependency
|
||||
import org.gradle.api.artifacts.ModuleVersionIdentifier
|
||||
import org.gradle.api.artifacts.ProjectDependency
|
||||
import org.gradle.api.artifacts.ResolvedArtifact
|
||||
import org.gradle.api.artifacts.dsl.RepositoryHandler
|
||||
import org.gradle.api.artifacts.maven.MavenPom
|
||||
import org.gradle.api.tasks.bundling.Jar
|
||||
import org.gradle.api.tasks.compile.JavaCompile
|
||||
import org.gradle.internal.jvm.Jvm
|
||||
import org.gradle.process.ExecResult
|
||||
import org.gradle.util.GradleVersion
|
||||
|
||||
import java.time.ZoneOffset
|
||||
import java.time.ZonedDateTime
|
||||
|
||||
/**
|
||||
* Encapsulates build configuration for elasticsearch projects.
|
||||
*/
|
||||
|
@ -70,15 +78,17 @@ class BuildPlugin implements Plugin<Project> {
|
|||
if (project.rootProject.ext.has('buildChecksDone') == false) {
|
||||
String javaHome = findJavaHome()
|
||||
File gradleJavaHome = Jvm.current().javaHome
|
||||
String gradleJavaVersionDetails = "${System.getProperty('java.vendor')} ${System.getProperty('java.version')}" +
|
||||
String javaVendor = System.getProperty('java.vendor')
|
||||
String javaVersion = System.getProperty('java.version')
|
||||
String gradleJavaVersionDetails = "${javaVendor} ${javaVersion}" +
|
||||
" [${System.getProperty('java.vm.name')} ${System.getProperty('java.vm.version')}]"
|
||||
|
||||
String javaVersionDetails = gradleJavaVersionDetails
|
||||
String javaVersion = System.getProperty('java.version')
|
||||
JavaVersion javaVersionEnum = JavaVersion.current()
|
||||
if (new File(javaHome).canonicalPath != gradleJavaHome.canonicalPath) {
|
||||
javaVersionDetails = findJavaVersionDetails(project, javaHome)
|
||||
javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome))
|
||||
javaVendor = findJavaVendor(project, javaHome)
|
||||
javaVersion = findJavaVersion(project, javaHome)
|
||||
}
|
||||
|
||||
|
@ -90,9 +100,12 @@ class BuildPlugin implements Plugin<Project> {
|
|||
println " OS Info : ${System.getProperty('os.name')} ${System.getProperty('os.version')} (${System.getProperty('os.arch')})"
|
||||
if (gradleJavaVersionDetails != javaVersionDetails) {
|
||||
println " JDK Version (gradle) : ${gradleJavaVersionDetails}"
|
||||
println " JAVA_HOME (gradle) : ${gradleJavaHome}"
|
||||
println " JDK Version (compile) : ${javaVersionDetails}"
|
||||
println " JAVA_HOME (compile) : ${javaHome}"
|
||||
} else {
|
||||
println " JDK Version : ${gradleJavaVersionDetails}"
|
||||
println " JAVA_HOME : ${gradleJavaHome}"
|
||||
}
|
||||
|
||||
// enforce gradle version
|
||||
|
@ -106,6 +119,25 @@ class BuildPlugin implements Plugin<Project> {
|
|||
throw new GradleException("Java ${minimumJava} or above is required to build Elasticsearch")
|
||||
}
|
||||
|
||||
// this block of code detecting buggy JDK 8 compiler versions can be removed when minimum Java version is incremented
|
||||
assert minimumJava == JavaVersion.VERSION_1_8 : "Remove JDK compiler bug detection only applicable to JDK 8"
|
||||
if (javaVersionEnum == JavaVersion.VERSION_1_8) {
|
||||
if (Objects.equals("Oracle Corporation", javaVendor)) {
|
||||
def matcher = javaVersion =~ /1\.8\.0(?:_(\d+))?/
|
||||
if (matcher.matches()) {
|
||||
int update;
|
||||
if (matcher.group(1) == null) {
|
||||
update = 0
|
||||
} else {
|
||||
update = matcher.group(1).toInteger()
|
||||
}
|
||||
if (update < 40) {
|
||||
throw new GradleException("JDK ${javaVendor} ${javaVersion} has compiler bug JDK-8052388, update your JDK to at least 8u40")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
project.rootProject.ext.javaHome = javaHome
|
||||
project.rootProject.ext.javaVersion = javaVersion
|
||||
project.rootProject.ext.buildChecksDone = true
|
||||
|
@ -145,6 +177,11 @@ class BuildPlugin implements Plugin<Project> {
|
|||
return runJavascript(project, javaHome, versionScript)
|
||||
}
|
||||
|
||||
private static String findJavaVendor(Project project, String javaHome) {
|
||||
String vendorScript = 'print(java.lang.System.getProperty("java.vendor"));'
|
||||
return runJavascript(project, javaHome, vendorScript)
|
||||
}
|
||||
|
||||
/** Finds the parsable java specification version */
|
||||
private static String findJavaVersion(Project project, String javaHome) {
|
||||
String versionScript = 'print(java.lang.System.getProperty("java.version"));'
|
||||
|
@ -190,6 +227,10 @@ class BuildPlugin implements Plugin<Project> {
|
|||
* to iterate the transitive dependencies and add excludes.
|
||||
*/
|
||||
static void configureConfigurations(Project project) {
|
||||
// we are not shipping these jars, we act like dumb consumers of these things
|
||||
if (project.path.startsWith(':test:fixtures')) {
|
||||
return
|
||||
}
|
||||
// fail on any conflicting dependency versions
|
||||
project.configurations.all({ Configuration configuration ->
|
||||
if (configuration.name.startsWith('_transitive_')) {
|
||||
|
@ -197,12 +238,16 @@ class BuildPlugin implements Plugin<Project> {
|
|||
// we just have them to find *what* transitive deps exist
|
||||
return
|
||||
}
|
||||
if (configuration.name.endsWith('Fixture')) {
|
||||
// just a self contained test-fixture configuration, likely transitive and hellacious
|
||||
return
|
||||
}
|
||||
configuration.resolutionStrategy.failOnVersionConflict()
|
||||
})
|
||||
|
||||
// force all dependencies added directly to compile/testCompile to be non-transitive, except for ES itself
|
||||
Closure disableTransitiveDeps = { ModuleDependency dep ->
|
||||
if (!(dep instanceof ProjectDependency) && dep.getGroup() != 'org.elasticsearch') {
|
||||
if (!(dep instanceof ProjectDependency) && dep.group.startsWith('org.elasticsearch') == false) {
|
||||
dep.transitive = false
|
||||
|
||||
// also create a configuration just for this dependency version, so that later
|
||||
|
@ -265,6 +310,12 @@ class BuildPlugin implements Plugin<Project> {
|
|||
/** Adds repositores used by ES dependencies */
|
||||
static void configureRepositories(Project project) {
|
||||
RepositoryHandler repos = project.repositories
|
||||
if (System.getProperty("repos.mavenlocal") != null) {
|
||||
// with -Drepos.mavenlocal=true we can force checking the local .m2 repo which is
|
||||
// useful for development ie. bwc tests where we install stuff in the local repository
|
||||
// such that we don't have to pass hardcoded files to gradle
|
||||
repos.mavenLocal()
|
||||
}
|
||||
repos.mavenCentral()
|
||||
repos.maven {
|
||||
name 'sonatype-snapshots'
|
||||
|
@ -273,7 +324,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
String luceneVersion = VersionProperties.lucene
|
||||
if (luceneVersion.contains('-snapshot')) {
|
||||
// extract the revision number from the version with a regex matcher
|
||||
String revision = (luceneVersion =~ /\w+-snapshot-(\d+)/)[0][1]
|
||||
String revision = (luceneVersion =~ /\w+-snapshot-([a-z0-9]+)/)[0][1]
|
||||
repos.maven {
|
||||
name 'lucene-snapshots'
|
||||
url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}"
|
||||
|
@ -293,15 +344,22 @@ class BuildPlugin implements Plugin<Project> {
|
|||
/*
|
||||
* -path because gradle will send in paths that don't always exist.
|
||||
* -missing because we have tons of missing @returns and @param.
|
||||
* -serial because we don't use java serialization.
|
||||
*/
|
||||
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
// compile with compact 3 profile by default
|
||||
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
|
||||
if (project.compactProfile != 'full') {
|
||||
options.compilerArgs << '-profile' << project.compactProfile
|
||||
}
|
||||
options.encoding = 'UTF-8'
|
||||
//options.incremental = true
|
||||
|
||||
// gradle ignores target/source compatibility when it is "unnecessary", but since to compile with
|
||||
// java 9, gradle is running in java 8, it incorrectly thinks it is unnecessary
|
||||
assert minimumJava == JavaVersion.VERSION_1_8
|
||||
options.compilerArgs << '-target' << '1.8' << '-source' << '1.8'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -310,11 +368,17 @@ class BuildPlugin implements Plugin<Project> {
|
|||
static void configureJarManifest(Project project) {
|
||||
project.tasks.withType(Jar) { Jar jarTask ->
|
||||
jarTask.doFirst {
|
||||
boolean isSnapshot = VersionProperties.elasticsearch.endsWith("-SNAPSHOT");
|
||||
String version = VersionProperties.elasticsearch;
|
||||
if (isSnapshot) {
|
||||
version = version.substring(0, version.length() - 9)
|
||||
}
|
||||
// this doFirst is added before the info plugin, therefore it will run
|
||||
// after the doFirst added by the info plugin, and we can override attributes
|
||||
jarTask.manifest.attributes(
|
||||
'X-Compile-Elasticsearch-Version': VersionProperties.elasticsearch,
|
||||
'X-Compile-Elasticsearch-Version': version,
|
||||
'X-Compile-Lucene-Version': VersionProperties.lucene,
|
||||
'X-Compile-Elasticsearch-Snapshot': isSnapshot,
|
||||
'Build-Date': ZonedDateTime.now(ZoneOffset.UTC),
|
||||
'Build-Java-Version': project.javaVersion)
|
||||
if (jarTask.manifest.attributes.containsKey('Change') == false) {
|
||||
|
@ -350,12 +414,14 @@ class BuildPlugin implements Plugin<Project> {
|
|||
// we use './temp' since this is per JVM and tests are forbidden from writing to CWD
|
||||
systemProperty 'java.io.tmpdir', './temp'
|
||||
systemProperty 'java.awt.headless', 'true'
|
||||
systemProperty 'tests.maven', 'true' // TODO: rename this once we've switched to gradle!
|
||||
systemProperty 'tests.gradle', 'true'
|
||||
systemProperty 'tests.artifact', project.name
|
||||
systemProperty 'tests.task', path
|
||||
systemProperty 'tests.security.manager', 'true'
|
||||
systemProperty 'jna.nosys', 'true'
|
||||
// default test sysprop values
|
||||
systemProperty 'tests.ifNoTests', 'fail'
|
||||
// TODO: remove setting logging level via system property
|
||||
systemProperty 'es.logger.level', 'WARN'
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('tests.') ||
|
||||
|
|
|
@ -19,9 +19,10 @@
|
|||
package org.elasticsearch.gradle
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.tasks.*
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.internal.nativeintegration.filesystem.Chmod
|
||||
import java.io.File
|
||||
|
||||
import javax.inject.Inject
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,8 +19,9 @@
|
|||
package org.elasticsearch.gradle
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.tasks.*
|
||||
import java.io.File
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
/**
|
||||
* Creates a file and sets it contents to something.
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.elasticsearch.gradle.BuildPlugin
|
|||
import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
import org.elasticsearch.gradle.test.RunTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.artifacts.Dependency
|
||||
import org.gradle.api.tasks.SourceSet
|
||||
import org.gradle.api.tasks.bundling.Zip
|
||||
|
||||
|
@ -51,6 +51,11 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
}
|
||||
|
||||
project.namingConventions {
|
||||
// Plugins decalare extensions of ESIntegTestCase as "Tests" instead of IT.
|
||||
skipIntegTestInDisguise = true
|
||||
}
|
||||
}
|
||||
createIntegTestTask(project)
|
||||
createBundleTask(project)
|
||||
|
@ -60,15 +65,13 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
private static void configureDependencies(Project project) {
|
||||
project.dependencies {
|
||||
provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}"
|
||||
testCompile "org.elasticsearch:test-framework:${project.versions.elasticsearch}"
|
||||
testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}"
|
||||
// we "upgrade" these optional deps to provided for plugins, since they will run
|
||||
// with a full elasticsearch server that includes optional deps
|
||||
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
provided "com.vividsolutions:jts:${project.versions.jts}"
|
||||
provided "com.github.spullara.mustache.java:compiler:${project.versions.mustache}"
|
||||
provided "log4j:log4j:${project.versions.log4j}"
|
||||
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
|
||||
provided "org.slf4j:slf4j-api:${project.versions.slf4j}"
|
||||
provided "net.java.dev.jna:jna:${project.versions.jna}"
|
||||
}
|
||||
}
|
||||
|
@ -108,8 +111,8 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
include 'config/**'
|
||||
include 'bin/**'
|
||||
}
|
||||
from('src/site') {
|
||||
include '_site/**'
|
||||
if (project.path.startsWith(':modules:') == false) {
|
||||
into('elasticsearch')
|
||||
}
|
||||
}
|
||||
project.assemble.dependsOn(bundle)
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.gradle.plugin
|
|||
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.Optional
|
||||
|
||||
/**
|
||||
* A container for plugin properties that will be written to the plugin descriptor, for easy
|
||||
|
@ -37,18 +36,9 @@ class PluginPropertiesExtension {
|
|||
@Input
|
||||
String description
|
||||
|
||||
@Input
|
||||
boolean jvm = true
|
||||
|
||||
@Input
|
||||
String classname
|
||||
|
||||
@Input
|
||||
boolean site = false
|
||||
|
||||
@Input
|
||||
boolean isolated = true
|
||||
|
||||
PluginPropertiesExtension(Project project) {
|
||||
name = project.name
|
||||
version = project.version
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.gradle.api.tasks.Copy
|
|||
class PluginPropertiesTask extends Copy {
|
||||
|
||||
PluginPropertiesExtension extension
|
||||
File generatedResourcesDir = new File(project.projectDir, 'generated-resources')
|
||||
File generatedResourcesDir = new File(project.buildDir, 'generated-resources')
|
||||
|
||||
PluginPropertiesTask() {
|
||||
File templateFile = new File(project.buildDir, 'templates/plugin-descriptor.properties')
|
||||
|
@ -51,33 +51,32 @@ class PluginPropertiesTask extends Copy {
|
|||
if (extension.description == null) {
|
||||
throw new InvalidUserDataException('description is a required setting for esplugin')
|
||||
}
|
||||
if (extension.jvm && extension.classname == null) {
|
||||
throw new InvalidUserDataException('classname is a required setting for esplugin with jvm=true')
|
||||
}
|
||||
doFirst {
|
||||
if (extension.jvm && extension.isolated == false) {
|
||||
String warning = "WARNING: Disabling plugin isolation in ${project.path} is deprecated and will be removed in the future"
|
||||
logger.warn("${'=' * warning.length()}\n${warning}\n${'=' * warning.length()}")
|
||||
}
|
||||
if (extension.classname == null) {
|
||||
throw new InvalidUserDataException('classname is a required setting for esplugin')
|
||||
}
|
||||
// configure property substitution
|
||||
from(templateFile)
|
||||
into(generatedResourcesDir)
|
||||
expand(generateSubstitutions())
|
||||
Map<String, String> properties = generateSubstitutions()
|
||||
expand(properties)
|
||||
inputs.properties(properties)
|
||||
}
|
||||
}
|
||||
|
||||
Map generateSubstitutions() {
|
||||
Map<String, String> generateSubstitutions() {
|
||||
def stringSnap = { version ->
|
||||
if (version.endsWith("-SNAPSHOT")) {
|
||||
return version.substring(0, version.length() - 9)
|
||||
}
|
||||
return version
|
||||
}
|
||||
return [
|
||||
'name': extension.name,
|
||||
'description': extension.description,
|
||||
'version': extension.version,
|
||||
'elasticsearchVersion': VersionProperties.elasticsearch,
|
||||
'version': stringSnap(extension.version),
|
||||
'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch),
|
||||
'javaVersion': project.targetCompatibility as String,
|
||||
'jvm': extension.jvm as String,
|
||||
'site': extension.site as String,
|
||||
'isolated': extension.isolated as String,
|
||||
'classname': extension.jvm ? extension.classname : 'NA'
|
||||
'classname': extension.classname
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,9 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputDirectory
|
||||
|
|
|
@ -61,11 +61,14 @@ public class ForbiddenPatternsTask extends DefaultTask {
|
|||
// add mandatory rules
|
||||
patterns.put('nocommit', /nocommit/)
|
||||
patterns.put('tab', /\t/)
|
||||
|
||||
inputs.property("excludes", filesFilter.excludes)
|
||||
inputs.property("rules", patterns)
|
||||
}
|
||||
|
||||
/** Adds a file glob pattern to be excluded */
|
||||
public void exclude(String... excludes) {
|
||||
this.filesFilter.exclude(excludes)
|
||||
filesFilter.exclude(excludes)
|
||||
}
|
||||
|
||||
/** Adds a pattern to forbid. T */
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.gradle.precommit
|
|||
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.InputFile
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
/**
|
||||
|
@ -35,14 +34,12 @@ public class JarHellTask extends LoggedExec {
|
|||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
@OutputFile
|
||||
public File successMarker = new File(project.buildDir, 'markers/jarHell')
|
||||
|
||||
/** The classpath to run jarhell check on, defaults to the test runtime classpath */
|
||||
@InputFile
|
||||
public FileCollection classpath = project.sourceSets.test.runtimeClasspath
|
||||
File successMarker = new File(project.buildDir, 'markers/jarHell')
|
||||
|
||||
public JarHellTask() {
|
||||
project.afterEvaluate {
|
||||
FileCollection classpath = project.sourceSets.test.runtimeClasspath
|
||||
inputs.files(classpath)
|
||||
dependsOn(classpath)
|
||||
description = "Runs CheckJarHell on ${classpath}"
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.apache.rat.anttasks.Report
|
||||
import org.apache.rat.anttasks.SubstringLicenseMatcher
|
||||
import org.apache.rat.license.SimpleLicenseFamily
|
||||
import org.elasticsearch.gradle.AntTask
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
import org.gradle.api.tasks.SourceSet
|
||||
|
||||
import java.nio.file.Files
|
||||
|
||||
/**
|
||||
* Checks files for license headers.
|
||||
* <p>
|
||||
* This is a port of the apache lucene check
|
||||
*/
|
||||
public class LicenseHeadersTask extends AntTask {
|
||||
|
||||
@OutputFile
|
||||
File reportFile = new File(project.buildDir, 'reports/licenseHeaders/rat.log')
|
||||
|
||||
/**
|
||||
* The list of java files to check. protected so the afterEvaluate closure in the
|
||||
* constructor can write to it.
|
||||
*/
|
||||
protected List<FileCollection> javaFiles
|
||||
|
||||
LicenseHeadersTask() {
|
||||
description = "Checks sources for missing, incorrect, or unacceptable license headers"
|
||||
// Delay resolving the dependencies until after evaluation so we pick up generated sources
|
||||
project.afterEvaluate {
|
||||
javaFiles = project.sourceSets.collect({it.allJava})
|
||||
inputs.files(javaFiles)
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void runAnt(AntBuilder ant) {
|
||||
ant.project.addTaskDefinition('ratReport', Report)
|
||||
ant.project.addDataTypeDefinition('substringMatcher', SubstringLicenseMatcher)
|
||||
ant.project.addDataTypeDefinition('approvedLicense', SimpleLicenseFamily)
|
||||
|
||||
Files.deleteIfExists(reportFile.toPath())
|
||||
|
||||
// run rat, going to the file
|
||||
List<FileCollection> input = javaFiles
|
||||
ant.ratReport(reportFile: reportFile.absolutePath, addDefaultLicenseMatchers: true) {
|
||||
for (FileCollection dirSet : input) {
|
||||
for (File dir: dirSet.srcDirs) {
|
||||
// sometimes these dirs don't exist, e.g. site-plugin has no actual java src/main...
|
||||
if (dir.exists()) {
|
||||
ant.fileset(dir: dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BSD 4-clause stuff (is disallowed below)
|
||||
// we keep this here, in case someone adds BSD code for some reason, it should never be allowed.
|
||||
substringMatcher(licenseFamilyCategory: "BSD4 ",
|
||||
licenseFamilyName: "Original BSD License (with advertising clause)") {
|
||||
pattern(substring: "All advertising materials")
|
||||
}
|
||||
|
||||
// Apache
|
||||
substringMatcher(licenseFamilyCategory: "AL ",
|
||||
licenseFamilyName: "Apache") {
|
||||
// Apache license (ES)
|
||||
pattern(substring: "Licensed to Elasticsearch under one or more contributor")
|
||||
// Apache license (ASF)
|
||||
pattern(substring: "Licensed to the Apache Software Foundation (ASF) under")
|
||||
// this is the old-school one under some files
|
||||
pattern(substring: "Licensed under the Apache License, Version 2.0 (the \"License\")")
|
||||
}
|
||||
|
||||
// Generated resources
|
||||
substringMatcher(licenseFamilyCategory: "GEN ",
|
||||
licenseFamilyName: "Generated") {
|
||||
// parsers generated by antlr
|
||||
pattern(substring: "ANTLR GENERATED CODE")
|
||||
}
|
||||
|
||||
// approved categories
|
||||
approvedLicense(familyName: "Apache")
|
||||
approvedLicense(familyName: "Generated")
|
||||
}
|
||||
|
||||
// check the license file for any errors, this should be fast.
|
||||
boolean zeroUnknownLicenses = false
|
||||
boolean foundProblemsWithFiles = false
|
||||
reportFile.eachLine('UTF-8') { line ->
|
||||
if (line.startsWith("0 Unknown Licenses")) {
|
||||
zeroUnknownLicenses = true
|
||||
}
|
||||
|
||||
if (line.startsWith(" !")) {
|
||||
foundProblemsWithFiles = true
|
||||
}
|
||||
}
|
||||
|
||||
if (zeroUnknownLicenses == false || foundProblemsWithFiles) {
|
||||
// print the unapproved license section, usually its all you need to fix problems.
|
||||
int sectionNumber = 0
|
||||
reportFile.eachLine('UTF-8') { line ->
|
||||
if (line.startsWith("*******************************")) {
|
||||
sectionNumber++
|
||||
} else {
|
||||
if (sectionNumber == 2) {
|
||||
logger.error(line)
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new IllegalStateException("License header problems were found! Full details: " + reportFile.absolutePath)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
/**
|
||||
* Runs LoggerUsageCheck on a set of directories.
|
||||
*/
|
||||
public class LoggerUsageTask extends LoggedExec {
|
||||
|
||||
/**
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
private File successMarker = new File(project.buildDir, 'markers/loggerUsage')
|
||||
|
||||
private FileCollection classpath;
|
||||
|
||||
private List<File> classDirectories;
|
||||
|
||||
public LoggerUsageTask() {
|
||||
project.afterEvaluate {
|
||||
dependsOn(classpath)
|
||||
description = "Runs LoggerUsageCheck on ${classDirectories}"
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
if (classDirectories == null) {
|
||||
classDirectories = []
|
||||
if (project.sourceSets.findByName("main") && project.sourceSets.main.output.classesDir.exists()) {
|
||||
classDirectories += [project.sourceSets.main.output.classesDir]
|
||||
dependsOn project.tasks.classes
|
||||
}
|
||||
if (project.sourceSets.findByName("test") && project.sourceSets.test.output.classesDir.exists()) {
|
||||
classDirectories += [project.sourceSets.test.output.classesDir]
|
||||
dependsOn project.tasks.testClasses
|
||||
}
|
||||
}
|
||||
doFirst({
|
||||
args('-cp', getClasspath().asPath, 'org.elasticsearch.test.loggerusage.ESLoggerUsageChecker')
|
||||
getClassDirectories().each {
|
||||
args it.getAbsolutePath()
|
||||
}
|
||||
})
|
||||
doLast({
|
||||
successMarker.parentFile.mkdirs()
|
||||
successMarker.setText("", 'UTF-8')
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@InputFiles
|
||||
FileCollection getClasspath() {
|
||||
return classpath
|
||||
}
|
||||
|
||||
void setClasspath(FileCollection classpath) {
|
||||
this.classpath = classpath
|
||||
}
|
||||
|
||||
@InputFiles
|
||||
List<File> getClassDirectories() {
|
||||
return classDirectories
|
||||
}
|
||||
|
||||
void setClassDirectories(List<File> classDirectories) {
|
||||
this.classDirectories = classDirectories
|
||||
}
|
||||
|
||||
@OutputFile
|
||||
File getSuccessMarker() {
|
||||
return successMarker
|
||||
}
|
||||
|
||||
void setSuccessMarker(File successMarker) {
|
||||
this.successMarker = successMarker
|
||||
}
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
/**
|
||||
* Runs NamingConventionsCheck on a classpath/directory combo to verify that
|
||||
* tests are named according to our conventions so they'll be picked up by
|
||||
* gradle. Read the Javadoc for NamingConventionsCheck to learn more.
|
||||
*/
|
||||
public class NamingConventionsTask extends LoggedExec {
|
||||
/**
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
@OutputFile
|
||||
File successMarker = new File(project.buildDir, 'markers/namingConventions')
|
||||
|
||||
/**
|
||||
* The classpath to run the naming conventions checks against. Must contain the files in the test
|
||||
* output directory and everything required to load those classes.
|
||||
*
|
||||
* We don't declare the actual test files as a dependency or input because if they change then
|
||||
* this will change.
|
||||
*/
|
||||
@InputFiles
|
||||
FileCollection classpath = project.sourceSets.test.runtimeClasspath
|
||||
|
||||
/**
|
||||
* Should we skip the integ tests in disguise tests? Defaults to true because only core names its
|
||||
* integ tests correctly.
|
||||
*/
|
||||
@Input
|
||||
boolean skipIntegTestInDisguise = false
|
||||
|
||||
public NamingConventionsTask() {
|
||||
dependsOn(classpath)
|
||||
description = "Runs NamingConventionsCheck on ${classpath}"
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
onlyIf { project.sourceSets.test.output.classesDir.exists() }
|
||||
/*
|
||||
* We build the arguments in a funny afterEvaluate/doFirst closure so that we can wait for the classpath to be
|
||||
* ready for us. Strangely neither one on their own are good enough.
|
||||
*/
|
||||
project.afterEvaluate {
|
||||
doFirst {
|
||||
args('-Djna.nosys=true')
|
||||
args('-cp', classpath.asPath, 'org.elasticsearch.test.NamingConventionsCheck')
|
||||
if (skipIntegTestInDisguise) {
|
||||
args('--skip-integ-tests-in-disguise')
|
||||
}
|
||||
/*
|
||||
* The test framework has classes that fail the checks to validate that the checks fail properly.
|
||||
* Since these would cause the build to fail we have to ignore them with this parameter. The
|
||||
* process of ignoring them lets us validate that they were found so this ignore parameter acts
|
||||
* as the test for the NamingConventionsCheck.
|
||||
*/
|
||||
if (':test:framework'.equals(project.path)) {
|
||||
args('--self-test')
|
||||
}
|
||||
args('--', project.sourceSets.test.output.classesDir.absolutePath)
|
||||
}
|
||||
}
|
||||
doLast { successMarker.setText("", 'UTF-8') }
|
||||
}
|
||||
}
|
|
@ -30,11 +30,15 @@ class PrecommitTasks {
|
|||
|
||||
/** Adds a precommit task, which depends on non-test verification tasks. */
|
||||
public static Task create(Project project, boolean includeDependencyLicenses) {
|
||||
|
||||
List<Task> precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
configureCheckstyle(project),
|
||||
configureNamingConventions(project),
|
||||
configureLoggerUsage(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class)]
|
||||
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class),
|
||||
project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class)]
|
||||
|
||||
// tasks with just tests don't need dependency licenses, so this flag makes adding
|
||||
// the task optional
|
||||
|
@ -60,25 +64,100 @@ class PrecommitTasks {
|
|||
project.forbiddenApis {
|
||||
internalRuntimeForbidden = true
|
||||
failOnUnsupportedJava = false
|
||||
bundledSignatures = ['jdk-unsafe', 'jdk-deprecated']
|
||||
signaturesURLs = [getClass().getResource('/forbidden/all-signatures.txt')]
|
||||
bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out']
|
||||
signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'),
|
||||
getClass().getResource('/forbidden/es-all-signatures.txt')]
|
||||
suppressAnnotations = ['**.SuppressForbidden']
|
||||
}
|
||||
Task mainForbidden = project.tasks.findByName('forbiddenApisMain')
|
||||
if (mainForbidden != null) {
|
||||
mainForbidden.configure {
|
||||
bundledSignatures += 'jdk-system-out'
|
||||
signaturesURLs += getClass().getResource('/forbidden/core-signatures.txt')
|
||||
signaturesURLs += getClass().getResource('/forbidden/es-core-signatures.txt')
|
||||
}
|
||||
}
|
||||
Task testForbidden = project.tasks.findByName('forbiddenApisTest')
|
||||
if (testForbidden != null) {
|
||||
testForbidden.configure {
|
||||
signaturesURLs += getClass().getResource('/forbidden/test-signatures.txt')
|
||||
signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt')
|
||||
}
|
||||
}
|
||||
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
|
||||
forbiddenApis.group = "" // clear group, so this does not show up under verification tasks
|
||||
return forbiddenApis
|
||||
}
|
||||
|
||||
private static Task configureCheckstyle(Project project) {
|
||||
// Always copy the checkstyle configuration files to 'buildDir/checkstyle' since the resources could be located in a jar
|
||||
// file. If the resources are located in a jar, Gradle will fail when it tries to turn the URL into a file
|
||||
URL checkstyleConfUrl = PrecommitTasks.getResource("/checkstyle.xml")
|
||||
URL checkstyleSuppressionsUrl = PrecommitTasks.getResource("/checkstyle_suppressions.xml")
|
||||
File checkstyleDir = new File(project.buildDir, "checkstyle")
|
||||
File checkstyleSuppressions = new File(checkstyleDir, "checkstyle_suppressions.xml")
|
||||
File checkstyleConf = new File(checkstyleDir, "checkstyle.xml");
|
||||
Task copyCheckstyleConf = project.tasks.create("copyCheckstyleConf")
|
||||
|
||||
// configure inputs and outputs so up to date works properly
|
||||
copyCheckstyleConf.outputs.files(checkstyleSuppressions, checkstyleConf)
|
||||
if ("jar".equals(checkstyleConfUrl.getProtocol())) {
|
||||
JarURLConnection jarURLConnection = (JarURLConnection) checkstyleConfUrl.openConnection()
|
||||
copyCheckstyleConf.inputs.file(jarURLConnection.getJarFileURL())
|
||||
} else if ("file".equals(checkstyleConfUrl.getProtocol())) {
|
||||
copyCheckstyleConf.inputs.files(checkstyleConfUrl.getFile(), checkstyleSuppressionsUrl.getFile())
|
||||
}
|
||||
|
||||
copyCheckstyleConf.doLast {
|
||||
checkstyleDir.mkdirs()
|
||||
// withStream will close the output stream and IOGroovyMethods#getBytes reads the InputStream fully and closes it
|
||||
new FileOutputStream(checkstyleConf).withStream {
|
||||
it.write(checkstyleConfUrl.openStream().getBytes())
|
||||
}
|
||||
new FileOutputStream(checkstyleSuppressions).withStream {
|
||||
it.write(checkstyleSuppressionsUrl.openStream().getBytes())
|
||||
}
|
||||
}
|
||||
|
||||
Task checkstyleTask = project.tasks.create('checkstyle')
|
||||
// Apply the checkstyle plugin to create `checkstyleMain` and `checkstyleTest`. It only
|
||||
// creates them if there is main or test code to check and it makes `check` depend
|
||||
// on them. But we want `precommit` to depend on `checkstyle` which depends on them so
|
||||
// we have to swap them.
|
||||
project.pluginManager.apply('checkstyle')
|
||||
project.checkstyle {
|
||||
config = project.resources.text.fromFile(checkstyleConf, 'UTF-8')
|
||||
configProperties = [
|
||||
suppressions: checkstyleSuppressions
|
||||
]
|
||||
}
|
||||
for (String taskName : ['checkstyleMain', 'checkstyleTest']) {
|
||||
Task task = project.tasks.findByName(taskName)
|
||||
if (task != null) {
|
||||
project.tasks['check'].dependsOn.remove(task)
|
||||
checkstyleTask.dependsOn(task)
|
||||
task.dependsOn(copyCheckstyleConf)
|
||||
task.inputs.file(checkstyleSuppressions)
|
||||
}
|
||||
}
|
||||
return checkstyleTask
|
||||
}
|
||||
|
||||
private static Task configureNamingConventions(Project project) {
|
||||
if (project.sourceSets.findByName("test")) {
|
||||
return project.tasks.create('namingConventions', NamingConventionsTask)
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
private static Task configureLoggerUsage(Project project) {
|
||||
Task loggerUsageTask = project.tasks.create('loggerUsageCheck', LoggerUsageTask.class)
|
||||
|
||||
project.configurations.create('loggerUsagePlugin')
|
||||
project.dependencies.add('loggerUsagePlugin',
|
||||
"org.elasticsearch.test:logger-usage:${org.elasticsearch.gradle.VersionProperties.elasticsearch}")
|
||||
|
||||
loggerUsageTask.configure {
|
||||
classpath = project.configurations.loggerUsagePlugin
|
||||
}
|
||||
|
||||
return loggerUsageTask
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,282 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit;
|
||||
|
||||
import org.apache.tools.ant.BuildEvent;
|
||||
import org.apache.tools.ant.BuildException;
|
||||
import org.apache.tools.ant.BuildListener;
|
||||
import org.apache.tools.ant.BuildLogger;
|
||||
import org.apache.tools.ant.DefaultLogger;
|
||||
import org.apache.tools.ant.Project;
|
||||
import org.elasticsearch.gradle.AntTask;
|
||||
import org.gradle.api.artifacts.Configuration;
|
||||
import org.gradle.api.file.FileCollection;
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Basic static checking to keep tabs on third party JARs
|
||||
*/
|
||||
public class ThirdPartyAuditTask extends AntTask {
|
||||
|
||||
// patterns for classes to exclude, because we understand their issues
|
||||
private List<String> excludes = [];
|
||||
|
||||
/**
|
||||
* Input for the task. Set javadoc for {#link getJars} for more. Protected
|
||||
* so the afterEvaluate closure in the constructor can write it.
|
||||
*/
|
||||
protected FileCollection jars;
|
||||
|
||||
/**
|
||||
* Classpath against which to run the third patty audit. Protected so the
|
||||
* afterEvaluate closure in the constructor can write it.
|
||||
*/
|
||||
protected FileCollection classpath;
|
||||
|
||||
/**
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
@OutputFile
|
||||
File successMarker = new File(project.buildDir, 'markers/thirdPartyAudit')
|
||||
|
||||
ThirdPartyAuditTask() {
|
||||
// we depend on this because its the only reliable configuration
|
||||
// this probably makes the build slower: gradle you suck here when it comes to configurations, you pay the price.
|
||||
dependsOn(project.configurations.testCompile);
|
||||
description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'";
|
||||
|
||||
project.afterEvaluate {
|
||||
Configuration configuration = project.configurations.findByName('runtime');
|
||||
if (configuration == null) {
|
||||
// some projects apparently do not have 'runtime'? what a nice inconsistency,
|
||||
// basically only serves to waste time in build logic!
|
||||
configuration = project.configurations.findByName('testCompile');
|
||||
}
|
||||
assert configuration != null;
|
||||
classpath = configuration
|
||||
|
||||
// we only want third party dependencies.
|
||||
jars = configuration.fileCollection({ dependency ->
|
||||
dependency.group.startsWith("org.elasticsearch") == false
|
||||
});
|
||||
|
||||
// we don't want provided dependencies, which we have already scanned. e.g. don't
|
||||
// scan ES core's dependencies for every single plugin
|
||||
Configuration provided = project.configurations.findByName('provided')
|
||||
if (provided != null) {
|
||||
jars -= provided
|
||||
}
|
||||
inputs.files(jars)
|
||||
onlyIf { jars.isEmpty() == false }
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* classes that should be excluded from the scan,
|
||||
* e.g. because we know what sheisty stuff those particular classes are up to.
|
||||
*/
|
||||
public void setExcludes(String[] classes) {
|
||||
for (String s : classes) {
|
||||
if (s.indexOf('*') != -1) {
|
||||
throw new IllegalArgumentException("illegal third party audit exclusion: '" + s + "', wildcards are not permitted!");
|
||||
}
|
||||
}
|
||||
excludes = classes.sort();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns current list of exclusions.
|
||||
*/
|
||||
@Input
|
||||
public List<String> getExcludes() {
|
||||
return excludes;
|
||||
}
|
||||
|
||||
// yes, we parse Uwe Schindler's errors to find missing classes, and to keep a continuous audit. Just don't let him know!
|
||||
static final Pattern MISSING_CLASS_PATTERN =
|
||||
Pattern.compile(/WARNING: The referenced class '(.*)' cannot be loaded\. Please fix the classpath\!/);
|
||||
|
||||
static final Pattern VIOLATION_PATTERN =
|
||||
Pattern.compile(/\s\sin ([a-zA-Z0-9\$\.]+) \(.*\)/);
|
||||
|
||||
// we log everything and capture errors and handle them with our whitelist
|
||||
// this is important, as we detect stale whitelist entries, workaround forbidden apis bugs,
|
||||
// and it also allows whitelisting missing classes!
|
||||
static class EvilLogger extends DefaultLogger {
|
||||
final Set<String> missingClasses = new TreeSet<>();
|
||||
final Map<String,List<String>> violations = new TreeMap<>();
|
||||
String previousLine = null;
|
||||
|
||||
@Override
|
||||
public void messageLogged(BuildEvent event) {
|
||||
if (event.getTask().getClass() == de.thetaphi.forbiddenapis.ant.AntTask.class) {
|
||||
if (event.getPriority() == Project.MSG_WARN) {
|
||||
Matcher m = MISSING_CLASS_PATTERN.matcher(event.getMessage());
|
||||
if (m.matches()) {
|
||||
missingClasses.add(m.group(1).replace('.', '/') + ".class");
|
||||
}
|
||||
} else if (event.getPriority() == Project.MSG_ERR) {
|
||||
Matcher m = VIOLATION_PATTERN.matcher(event.getMessage());
|
||||
if (m.matches()) {
|
||||
String violation = previousLine + '\n' + event.getMessage();
|
||||
String clazz = m.group(1).replace('.', '/') + ".class";
|
||||
List<String> current = violations.get(clazz);
|
||||
if (current == null) {
|
||||
current = new ArrayList<>();
|
||||
violations.put(clazz, current);
|
||||
}
|
||||
current.add(violation);
|
||||
}
|
||||
previousLine = event.getMessage();
|
||||
}
|
||||
}
|
||||
super.messageLogged(event);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BuildLogger makeLogger(PrintStream stream, int outputLevel) {
|
||||
DefaultLogger log = new EvilLogger();
|
||||
log.errorPrintStream = stream;
|
||||
log.outputPrintStream = stream;
|
||||
log.messageOutputLevel = outputLevel;
|
||||
return log;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void runAnt(AntBuilder ant) {
|
||||
ant.project.addTaskDefinition('thirdPartyAudit', de.thetaphi.forbiddenapis.ant.AntTask);
|
||||
|
||||
// print which jars we are going to scan, always
|
||||
// this is not the time to try to be succinct! Forbidden will print plenty on its own!
|
||||
Set<String> names = new TreeSet<>();
|
||||
for (File jar : jars) {
|
||||
names.add(jar.getName());
|
||||
}
|
||||
|
||||
// TODO: forbidden-apis + zipfileset gives O(n^2) behavior unless we dump to a tmpdir first,
|
||||
// and then remove our temp dir afterwards. don't complain: try it yourself.
|
||||
// we don't use gradle temp dir handling, just google it, or try it yourself.
|
||||
|
||||
File tmpDir = new File(project.buildDir, 'tmp/thirdPartyAudit');
|
||||
|
||||
// clean up any previous mess (if we failed), then unzip everything to one directory
|
||||
ant.delete(dir: tmpDir.getAbsolutePath());
|
||||
tmpDir.mkdirs();
|
||||
for (File jar : jars) {
|
||||
ant.unzip(src: jar.getAbsolutePath(), dest: tmpDir.getAbsolutePath());
|
||||
}
|
||||
|
||||
// convert exclusion class names to binary file names
|
||||
List<String> excludedFiles = excludes.collect {it.replace('.', '/') + ".class"}
|
||||
Set<String> excludedSet = new TreeSet<>(excludedFiles);
|
||||
|
||||
// jarHellReprise
|
||||
Set<String> sheistySet = getSheistyClasses(tmpDir.toPath());
|
||||
|
||||
try {
|
||||
ant.thirdPartyAudit(internalRuntimeForbidden: false,
|
||||
failOnUnsupportedJava: false,
|
||||
failOnMissingClasses: false,
|
||||
signaturesFile: new File(getClass().getResource('/forbidden/third-party-audit.txt').toURI()),
|
||||
classpath: classpath.asPath) {
|
||||
fileset(dir: tmpDir)
|
||||
}
|
||||
} catch (BuildException ignore) {}
|
||||
|
||||
EvilLogger evilLogger = null;
|
||||
for (BuildListener listener : ant.project.getBuildListeners()) {
|
||||
if (listener instanceof EvilLogger) {
|
||||
evilLogger = (EvilLogger) listener;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert evilLogger != null;
|
||||
|
||||
// keep our whitelist up to date
|
||||
Set<String> bogusExclusions = new TreeSet<>(excludedSet);
|
||||
bogusExclusions.removeAll(sheistySet);
|
||||
bogusExclusions.removeAll(evilLogger.missingClasses);
|
||||
bogusExclusions.removeAll(evilLogger.violations.keySet());
|
||||
if (!bogusExclusions.isEmpty()) {
|
||||
throw new IllegalStateException("Invalid exclusions, nothing is wrong with these classes: " + bogusExclusions);
|
||||
}
|
||||
|
||||
// don't duplicate classes with the JDK
|
||||
sheistySet.removeAll(excludedSet);
|
||||
if (!sheistySet.isEmpty()) {
|
||||
throw new IllegalStateException("JAR HELL WITH JDK! " + sheistySet);
|
||||
}
|
||||
|
||||
// don't allow a broken classpath
|
||||
evilLogger.missingClasses.removeAll(excludedSet);
|
||||
if (!evilLogger.missingClasses.isEmpty()) {
|
||||
throw new IllegalStateException("CLASSES ARE MISSING! " + evilLogger.missingClasses);
|
||||
}
|
||||
|
||||
// don't use internal classes
|
||||
evilLogger.violations.keySet().removeAll(excludedSet);
|
||||
if (!evilLogger.violations.isEmpty()) {
|
||||
throw new IllegalStateException("VIOLATIONS WERE FOUND! " + evilLogger.violations);
|
||||
}
|
||||
|
||||
// clean up our mess (if we succeed)
|
||||
ant.delete(dir: tmpDir.getAbsolutePath());
|
||||
|
||||
successMarker.setText("", 'UTF-8')
|
||||
}
|
||||
|
||||
/**
|
||||
* check for sheisty classes: if they also exist in the extensions classloader, its jar hell with the jdk!
|
||||
*/
|
||||
private Set<String> getSheistyClasses(Path root) {
|
||||
// system.parent = extensions loader.
|
||||
// note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!).
|
||||
// but groovy/gradle needs to work at all first!
|
||||
ClassLoader ext = ClassLoader.getSystemClassLoader().getParent();
|
||||
assert ext != null;
|
||||
|
||||
Set<String> sheistySet = new TreeSet<>();
|
||||
Files.walkFileTree(root, new SimpleFileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
String entry = root.relativize(file).toString().replace('\\', '/');
|
||||
if (entry.endsWith(".class")) {
|
||||
if (ext.getResource(entry) != null) {
|
||||
sheistySet.add(entry);
|
||||
}
|
||||
}
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
return sheistySet;
|
||||
}
|
||||
}
|
|
@ -33,10 +33,16 @@ class ClusterConfiguration {
|
|||
int numNodes = 1
|
||||
|
||||
@Input
|
||||
int baseHttpPort = 9400
|
||||
int numBwcNodes = 0
|
||||
|
||||
@Input
|
||||
int baseTransportPort = 9500
|
||||
String bwcVersion = null
|
||||
|
||||
@Input
|
||||
int httpPort = 0
|
||||
|
||||
@Input
|
||||
int transportPort = 0
|
||||
|
||||
@Input
|
||||
boolean daemonize = true
|
||||
|
@ -47,6 +53,15 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
String jvmArgs = System.getProperty('tests.jvm.argline', '')
|
||||
|
||||
/**
|
||||
* The seed nodes port file. In the case the cluster has more than one node we use a seed node
|
||||
* to form the cluster. The file is null if there is no seed node yet available.
|
||||
*
|
||||
* Note: this can only be null if the cluster has only one node or if the first node is not yet
|
||||
* configured. All nodes but the first node should see a non null value.
|
||||
*/
|
||||
File seedNodePortsFile
|
||||
|
||||
/**
|
||||
* A closure to call before the cluster is considered ready. The closure is passed the node info,
|
||||
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
|
||||
|
@ -55,7 +70,7 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
ant.get(src: "http://localhost:${node.httpPort()}",
|
||||
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
|
||||
retries: 10)
|
||||
|
@ -117,4 +132,12 @@ class ClusterConfiguration {
|
|||
}
|
||||
extraConfigFiles.put(path, sourceFile)
|
||||
}
|
||||
|
||||
/** Returns an address and port suitable for a uri to connect to this clusters seed node over transport protocol*/
|
||||
String seedNodeTransportUri() {
|
||||
if (seedNodePortsFile != null) {
|
||||
return seedNodePortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,11 +23,18 @@ import org.apache.tools.ant.taskdefs.condition.Os
|
|||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.AntBuilder
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.logging.Logger
|
||||
import org.gradle.api.tasks.*
|
||||
import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.Delete
|
||||
import org.gradle.api.tasks.Exec
|
||||
|
||||
import java.nio.file.Paths
|
||||
|
||||
|
@ -38,40 +45,85 @@ class ClusterFormationTasks {
|
|||
|
||||
/**
|
||||
* Adds dependent tasks to the given task to start and stop a cluster with the given configuration.
|
||||
*
|
||||
* Returns a NodeInfo object for the first node in the cluster.
|
||||
*/
|
||||
static void setup(Project project, Task task, ClusterConfiguration config) {
|
||||
static NodeInfo setup(Project project, Task task, ClusterConfiguration config) {
|
||||
if (task.getEnabled() == false) {
|
||||
// no need to add cluster formation tasks if the task won't run!
|
||||
return
|
||||
}
|
||||
configureDistributionDependency(project, config.distribution)
|
||||
List<Task> startTasks = []
|
||||
File sharedDir = new File(project.buildDir, "cluster/shared")
|
||||
// first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything
|
||||
// in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk
|
||||
// such that snapshots survive failures / test runs and there is no simple way today to fix that.
|
||||
Task cleanup = project.tasks.create(name: "${task.name}#prepareCluster.cleanShared", type: Delete, dependsOn: task.dependsOn.collect()) {
|
||||
delete sharedDir
|
||||
doLast {
|
||||
sharedDir.mkdirs()
|
||||
}
|
||||
}
|
||||
List<Task> startTasks = [cleanup]
|
||||
List<NodeInfo> nodes = []
|
||||
if (config.numNodes < config.numBwcNodes) {
|
||||
throw new GradleException("numNodes must be >= numBwcNodes [${config.numNodes} < ${config.numBwcNodes}]")
|
||||
}
|
||||
if (config.numBwcNodes > 0 && config.bwcVersion == null) {
|
||||
throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0")
|
||||
}
|
||||
// this is our current version distribution configuration we use for all kinds of REST tests etc.
|
||||
project.configurations {
|
||||
elasticsearchDistro
|
||||
}
|
||||
configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchDistro, VersionProperties.elasticsearch)
|
||||
if (config.bwcVersion != null && config.numBwcNodes > 0) {
|
||||
// if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version
|
||||
// this version uses the same distribution etc. and only differs in the version we depend on.
|
||||
// from here on everything else works the same as if it's the current version, we fetch the BWC version
|
||||
// from mirrors using gradles built-in mechanism etc.
|
||||
project.configurations {
|
||||
elasticsearchBwcDistro
|
||||
}
|
||||
configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion)
|
||||
}
|
||||
|
||||
for (int i = 0; i < config.numNodes; ++i) {
|
||||
NodeInfo node = new NodeInfo(config, i, project, task)
|
||||
// we start N nodes and out of these N nodes there might be M bwc nodes.
|
||||
// for each of those nodes we might have a different configuratioon
|
||||
String elasticsearchVersion = VersionProperties.elasticsearch
|
||||
Configuration configuration = project.configurations.elasticsearchDistro
|
||||
if (i < config.numBwcNodes) {
|
||||
elasticsearchVersion = config.bwcVersion
|
||||
configuration = project.configurations.elasticsearchBwcDistro
|
||||
}
|
||||
NodeInfo node = new NodeInfo(config, i, project, task, elasticsearchVersion, sharedDir)
|
||||
if (i == 0) {
|
||||
if (config.seedNodePortsFile != null) {
|
||||
// we might allow this in the future to be set but for now we are the only authority to set this!
|
||||
throw new GradleException("seedNodePortsFile has a non-null value but first node has not been intialized")
|
||||
}
|
||||
config.seedNodePortsFile = node.transportPortsFile;
|
||||
}
|
||||
nodes.add(node)
|
||||
startTasks.add(configureNode(project, task, node))
|
||||
startTasks.add(configureNode(project, task, cleanup, node, configuration))
|
||||
}
|
||||
|
||||
Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks)
|
||||
task.dependsOn(wait)
|
||||
|
||||
// delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests
|
||||
return nodes[0]
|
||||
}
|
||||
|
||||
/** Adds a dependency on the given distribution */
|
||||
static void configureDistributionDependency(Project project, String distro) {
|
||||
String elasticsearchVersion = VersionProperties.elasticsearch
|
||||
static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) {
|
||||
String packaging = distro
|
||||
if (distro == 'tar') {
|
||||
packaging = 'tar.gz'
|
||||
} else if (distro == 'integ-test-zip') {
|
||||
packaging = 'zip'
|
||||
}
|
||||
project.configurations {
|
||||
elasticsearchDistro
|
||||
}
|
||||
project.dependencies {
|
||||
elasticsearchDistro "org.elasticsearch.distribution.${distro}:elasticsearch:${elasticsearchVersion}@${packaging}"
|
||||
}
|
||||
project.dependencies.add(configuration.name, "org.elasticsearch.distribution.${distro}:elasticsearch:${elasticsearchVersion}@${packaging}")
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -91,10 +143,10 @@ class ClusterFormationTasks {
|
|||
*
|
||||
* @return a task which starts the node.
|
||||
*/
|
||||
static Task configureNode(Project project, Task task, NodeInfo node) {
|
||||
static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration) {
|
||||
|
||||
// tasks are chained so their execution order is maintained
|
||||
Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: task.dependsOn.collect()) {
|
||||
Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: dependsOn) {
|
||||
delete node.homeDir
|
||||
delete node.cwd
|
||||
doLast {
|
||||
|
@ -103,7 +155,7 @@ class ClusterFormationTasks {
|
|||
}
|
||||
setup = configureCheckPreviousTask(taskName(task, node, 'checkPrevious'), project, setup, node)
|
||||
setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node)
|
||||
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node)
|
||||
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration)
|
||||
setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node)
|
||||
setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
|
||||
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
|
||||
|
@ -139,27 +191,28 @@ class ClusterFormationTasks {
|
|||
}
|
||||
|
||||
/** Adds a task to extract the elasticsearch distribution */
|
||||
static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
List extractDependsOn = [project.configurations.elasticsearchDistro, setup]
|
||||
/* project.configurations.elasticsearchDistro.singleFile will be an
|
||||
external artifact if this is being run by a plugin not living in the
|
||||
elasticsearch source tree. If this is a plugin built in the
|
||||
elasticsearch source tree or this is a distro in the elasticsearch
|
||||
source tree then this should be the version of elasticsearch built
|
||||
by the source tree. If it isn't then Bad Things(TM) will happen. */
|
||||
static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node, Configuration configuration) {
|
||||
List extractDependsOn = [configuration, setup]
|
||||
/* configuration.singleFile will be an external artifact if this is being run by a plugin not living in the
|
||||
elasticsearch source tree. If this is a plugin built in the elasticsearch source tree or this is a distro in
|
||||
the elasticsearch source tree then this should be the version of elasticsearch built by the source tree.
|
||||
If it isn't then Bad Things(TM) will happen. */
|
||||
Task extract
|
||||
|
||||
switch (node.config.distribution) {
|
||||
case 'integ-test-zip':
|
||||
case 'zip':
|
||||
extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) {
|
||||
from { project.zipTree(project.configurations.elasticsearchDistro.singleFile) }
|
||||
from {
|
||||
project.zipTree(configuration.singleFile)
|
||||
}
|
||||
into node.baseDir
|
||||
}
|
||||
break;
|
||||
case 'tar':
|
||||
extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) {
|
||||
from {
|
||||
project.tarTree(project.resources.gzip(project.configurations.elasticsearchDistro.singleFile))
|
||||
project.tarTree(project.resources.gzip(configuration.singleFile))
|
||||
}
|
||||
into node.baseDir
|
||||
}
|
||||
|
@ -168,7 +221,7 @@ class ClusterFormationTasks {
|
|||
File rpmDatabase = new File(node.baseDir, 'rpm-database')
|
||||
File rpmExtracted = new File(node.baseDir, 'rpm-extracted')
|
||||
/* Delay reading the location of the rpm file until task execution */
|
||||
Object rpm = "${ -> project.configurations.elasticsearchDistro.singleFile}"
|
||||
Object rpm = "${ -> configuration.singleFile}"
|
||||
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
|
||||
commandLine 'rpm', '--badreloc', '--nodeps', '--noscripts', '--notriggers',
|
||||
'--dbpath', rpmDatabase,
|
||||
|
@ -183,7 +236,7 @@ class ClusterFormationTasks {
|
|||
case 'deb':
|
||||
/* Delay reading the location of the deb file until task execution */
|
||||
File debExtracted = new File(node.baseDir, 'deb-extracted')
|
||||
Object deb = "${ -> project.configurations.elasticsearchDistro.singleFile}"
|
||||
Object deb = "${ -> configuration.singleFile}"
|
||||
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
|
||||
commandLine 'dpkg-deb', '-x', deb, debExtracted
|
||||
doFirst {
|
||||
|
@ -200,21 +253,30 @@ class ClusterFormationTasks {
|
|||
/** Adds a task to write elasticsearch.yml for the given node configuration */
|
||||
static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
Map esConfig = [
|
||||
'cluster.name' : node.clusterName,
|
||||
'http.port' : node.httpPort(),
|
||||
'transport.tcp.port' : node.transportPort(),
|
||||
'pidfile' : node.pidFile,
|
||||
'discovery.zen.ping.unicast.hosts': (0..<node.config.numNodes).collect{"127.0.0.1:${node.config.baseTransportPort + it}"}.join(','),
|
||||
'path.repo' : "${node.homeDir}/repo",
|
||||
'path.shared_data' : "${node.homeDir}/../",
|
||||
// Define a node attribute so we can test that it exists
|
||||
'node.testattr' : 'test',
|
||||
'repositories.url.allowed_urls' : 'http://snapshot.test*'
|
||||
'cluster.name' : node.clusterName,
|
||||
'pidfile' : node.pidFile,
|
||||
'path.repo' : "${node.sharedDir}/repo",
|
||||
'path.shared_data' : "${node.sharedDir}/",
|
||||
// Define a node attribute so we can test that it exists
|
||||
'node.attr.testattr' : 'test',
|
||||
'repositories.url.allowed_urls': 'http://snapshot.test*'
|
||||
]
|
||||
esConfig['http.port'] = node.config.httpPort
|
||||
esConfig['transport.tcp.port'] = node.config.transportPort
|
||||
esConfig.putAll(node.config.settings)
|
||||
|
||||
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
|
||||
writeConfig.doFirst {
|
||||
if (node.nodeNum > 0) { // multi-node cluster case, we have to wait for the seed node to startup
|
||||
ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') {
|
||||
resourceexists {
|
||||
file(file: node.config.seedNodePortsFile.toString())
|
||||
}
|
||||
}
|
||||
// the seed node is enough to form the cluster - all subsequent nodes will get the seed node as a unicast
|
||||
// host and join the cluster via that.
|
||||
esConfig['discovery.zen.ping.unicast.hosts'] = "\"${node.config.seedNodeTransportUri()}\""
|
||||
}
|
||||
File configFile = new File(node.confDir, 'elasticsearch.yml')
|
||||
logger.info("Configuring ${configFile}")
|
||||
configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8')
|
||||
|
@ -226,7 +288,8 @@ class ClusterFormationTasks {
|
|||
return setup
|
||||
}
|
||||
Copy copyConfig = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||
copyConfig.into(new File(node.homeDir, 'config')) // copy must always have a general dest dir, even though we don't use it
|
||||
File configDir = new File(node.homeDir, 'config')
|
||||
copyConfig.into(configDir) // copy must always have a general dest dir, even though we don't use it
|
||||
for (Map.Entry<String,Object> extraConfigFile : node.config.extraConfigFiles.entrySet()) {
|
||||
copyConfig.doFirst {
|
||||
// make sure the copy won't be a no-op or act on a directory
|
||||
|
@ -239,9 +302,12 @@ class ClusterFormationTasks {
|
|||
}
|
||||
}
|
||||
File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey())
|
||||
copyConfig.into(destConfigFile.canonicalFile.parentFile)
|
||||
.from({ extraConfigFile.getValue() }) // wrap in closure to delay resolution to execution time
|
||||
.rename { destConfigFile.name }
|
||||
// wrap source file in closure to delay resolution to execution time
|
||||
copyConfig.from({ extraConfigFile.getValue() }) {
|
||||
// this must be in a closure so it is only applied to the single file specified in from above
|
||||
into(configDir.toPath().relativize(destConfigFile.canonicalFile.parentFile.toPath()).toFile())
|
||||
rename { destConfigFile.name }
|
||||
}
|
||||
}
|
||||
return copyConfig
|
||||
}
|
||||
|
@ -321,10 +387,26 @@ class ClusterFormationTasks {
|
|||
}
|
||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||
String file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||
Object[] args = [new File(node.homeDir, 'bin/plugin'), 'install', file]
|
||||
Object[] args = [new File(node.homeDir, 'bin/elasticsearch-plugin'), 'install', file]
|
||||
return configureExecTask(name, project, setup, node, args)
|
||||
}
|
||||
|
||||
/** Wrapper for command line argument: surrounds comma with double quotes **/
|
||||
private static class EscapeCommaWrapper {
|
||||
|
||||
Object arg
|
||||
|
||||
public String toString() {
|
||||
String s = arg.toString()
|
||||
|
||||
/// Surround strings that contains a comma with double quotes
|
||||
if (s.indexOf(',') != -1) {
|
||||
return "\"${s}\""
|
||||
}
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds a task to execute a command to help setup the cluster */
|
||||
static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) {
|
||||
return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) {
|
||||
|
@ -332,10 +414,13 @@ class ClusterFormationTasks {
|
|||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
executable 'cmd'
|
||||
args '/C', 'call'
|
||||
// On Windows the comma character is considered a parameter separator:
|
||||
// argument are wrapped in an ExecArgWrapper that escapes commas
|
||||
args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) }
|
||||
} else {
|
||||
executable 'sh'
|
||||
args execArgs
|
||||
}
|
||||
args execArgs
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -366,7 +451,7 @@ class ClusterFormationTasks {
|
|||
// gradle task options are not processed until the end of the configuration phase
|
||||
if (node.config.debug) {
|
||||
println 'Running elasticsearch in debug mode, suspending until connected on port 8000'
|
||||
node.env['JAVA_OPTS'] = '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000'
|
||||
node.env['ES_JAVA_OPTS'] = '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000'
|
||||
}
|
||||
|
||||
node.getCommandString().eachLine { line -> logger.info(line) }
|
||||
|
@ -400,7 +485,12 @@ class ClusterFormationTasks {
|
|||
resourceexists {
|
||||
file(file: node.pidFile.toString())
|
||||
}
|
||||
socket(server: '127.0.0.1', port: node.httpPort())
|
||||
resourceexists {
|
||||
file(file: node.httpPortsFile.toString())
|
||||
}
|
||||
resourceexists {
|
||||
file(file: node.transportPortsFile.toString())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -444,6 +534,8 @@ class ClusterFormationTasks {
|
|||
logger.error("|-----------------------------------------")
|
||||
logger.error("| failure marker exists: ${node.failedMarker.exists()}")
|
||||
logger.error("| pid file exists: ${node.pidFile.exists()}")
|
||||
logger.error("| http ports file exists: ${node.httpPortsFile.exists()}")
|
||||
logger.error("| transport ports file exists: ${node.transportPortsFile.exists()}")
|
||||
// the waitfor failed, so dump any output we got (if info logging this goes directly to stdout)
|
||||
logger.error("|\n| [ant output]")
|
||||
node.buffer.toString('UTF-8').eachLine { line -> logger.error("| ${line}") }
|
||||
|
|
|
@ -0,0 +1,287 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.AntTask
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.tasks.Exec
|
||||
import org.gradle.api.tasks.Input
|
||||
|
||||
/**
|
||||
* A fixture for integration tests which runs in a separate process.
|
||||
*/
|
||||
public class Fixture extends AntTask {
|
||||
|
||||
/** The path to the executable that starts the fixture. */
|
||||
@Input
|
||||
String executable
|
||||
|
||||
private final List<Object> arguments = new ArrayList<>()
|
||||
|
||||
@Input
|
||||
public void args(Object... args) {
|
||||
arguments.addAll(args)
|
||||
}
|
||||
|
||||
/**
|
||||
* Environment variables for the fixture process. The value can be any object, which
|
||||
* will have toString() called at execution time.
|
||||
*/
|
||||
private final Map<String, Object> environment = new HashMap<>()
|
||||
|
||||
@Input
|
||||
public void env(String key, Object value) {
|
||||
environment.put(key, value)
|
||||
}
|
||||
|
||||
/** A flag to indicate whether the command should be executed from a shell. */
|
||||
@Input
|
||||
boolean useShell = false
|
||||
|
||||
/**
|
||||
* A flag to indicate whether the fixture should be run in the foreground, or spawned.
|
||||
* It is protected so subclasses can override (eg RunTask).
|
||||
*/
|
||||
protected boolean spawn = true
|
||||
|
||||
/**
|
||||
* A closure to call before the fixture is considered ready. The closure is passed the fixture object,
|
||||
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
|
||||
* condition is for http on the http port.
|
||||
*/
|
||||
@Input
|
||||
Closure waitCondition = { Fixture fixture, AntBuilder ant ->
|
||||
File tmpFile = new File(fixture.cwd, 'wait.success')
|
||||
ant.get(src: "http://${fixture.addressAndPort}",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging information can be flushed
|
||||
retries: 10)
|
||||
return tmpFile.exists()
|
||||
}
|
||||
|
||||
/** A task which will stop this fixture. This should be used as a finalizedBy for any tasks that use the fixture. */
|
||||
public final Task stopTask
|
||||
|
||||
public Fixture() {
|
||||
stopTask = createStopTask()
|
||||
finalizedBy(stopTask)
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void runAnt(AntBuilder ant) {
|
||||
project.delete(baseDir) // reset everything
|
||||
cwd.mkdirs()
|
||||
final String realExecutable
|
||||
final List<Object> realArgs = new ArrayList<>()
|
||||
final Map<String, Object> realEnv = environment
|
||||
// We need to choose which executable we are using. In shell mode, or when we
|
||||
// are spawning and thus using the wrapper script, the executable is the shell.
|
||||
if (useShell || spawn) {
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
realExecutable = 'cmd'
|
||||
realArgs.add('/C')
|
||||
realArgs.add('"') // quote the entire command
|
||||
} else {
|
||||
realExecutable = 'sh'
|
||||
}
|
||||
} else {
|
||||
realExecutable = executable
|
||||
realArgs.addAll(arguments)
|
||||
}
|
||||
if (spawn) {
|
||||
writeWrapperScript(executable)
|
||||
realArgs.add(wrapperScript)
|
||||
realArgs.addAll(arguments)
|
||||
}
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS) && (useShell || spawn)) {
|
||||
realArgs.add('"')
|
||||
}
|
||||
commandString.eachLine { line -> logger.info(line) }
|
||||
|
||||
ant.exec(executable: realExecutable, spawn: spawn, dir: cwd, taskname: name) {
|
||||
realEnv.each { key, value -> env(key: key, value: value) }
|
||||
realArgs.each { arg(value: it) }
|
||||
}
|
||||
|
||||
String failedProp = "failed${name}"
|
||||
// first wait for resources, or the failure marker from the wrapper script
|
||||
ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) {
|
||||
or {
|
||||
resourceexists {
|
||||
file(file: failureMarker.toString())
|
||||
}
|
||||
and {
|
||||
resourceexists {
|
||||
file(file: pidFile.toString())
|
||||
}
|
||||
resourceexists {
|
||||
file(file: portsFile.toString())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ant.project.getProperty(failedProp) || failureMarker.exists()) {
|
||||
fail("Failed to start ${name}")
|
||||
}
|
||||
|
||||
// the process is started (has a pid) and is bound to a network interface
|
||||
// so now wait undil the waitCondition has been met
|
||||
// TODO: change this to a loop?
|
||||
boolean success
|
||||
try {
|
||||
success = waitCondition(this, ant) == false
|
||||
} catch (Exception e) {
|
||||
String msg = "Wait condition caught exception for ${name}"
|
||||
logger.error(msg, e)
|
||||
fail(msg, e)
|
||||
}
|
||||
if (success == false) {
|
||||
fail("Wait condition failed for ${name}")
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns a debug string used to log information about how the fixture was run. */
|
||||
protected String getCommandString() {
|
||||
String commandString = "\n${name} configuration:\n"
|
||||
commandString += "-----------------------------------------\n"
|
||||
commandString += " cwd: ${cwd}\n"
|
||||
commandString += " command: ${executable} ${arguments.join(' ')}\n"
|
||||
commandString += ' environment:\n'
|
||||
environment.each { k, v -> commandString += " ${k}: ${v}\n" }
|
||||
if (spawn) {
|
||||
commandString += "\n [${wrapperScript.name}]\n"
|
||||
wrapperScript.eachLine('UTF-8', { line -> commandString += " ${line}\n"})
|
||||
}
|
||||
return commandString
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a script to run the real executable, so that stdout/stderr can be captured.
|
||||
* TODO: this could be removed if we do use our own ProcessBuilder and pump output from the process
|
||||
*/
|
||||
private void writeWrapperScript(String executable) {
|
||||
wrapperScript.parentFile.mkdirs()
|
||||
String argsPasser = '"$@"'
|
||||
String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi"
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
argsPasser = '%*'
|
||||
exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )"
|
||||
}
|
||||
wrapperScript.setText("\"${executable}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
|
||||
}
|
||||
|
||||
/** Fail the build with the given message, and logging relevant info*/
|
||||
private void fail(String msg, Exception... suppressed) {
|
||||
if (logger.isInfoEnabled() == false) {
|
||||
// We already log the command at info level. No need to do it twice.
|
||||
commandString.eachLine { line -> logger.error(line) }
|
||||
}
|
||||
logger.error("${name} output:")
|
||||
logger.error("-----------------------------------------")
|
||||
logger.error(" failure marker exists: ${failureMarker.exists()}")
|
||||
logger.error(" pid file exists: ${pidFile.exists()}")
|
||||
logger.error(" ports file exists: ${portsFile.exists()}")
|
||||
// also dump the log file for the startup script (which will include ES logging output to stdout)
|
||||
if (runLog.exists()) {
|
||||
logger.error("\n [log]")
|
||||
runLog.eachLine { line -> logger.error(" ${line}") }
|
||||
}
|
||||
logger.error("-----------------------------------------")
|
||||
GradleException toThrow = new GradleException(msg)
|
||||
for (Exception e : suppressed) {
|
||||
toThrow.addSuppressed(e)
|
||||
}
|
||||
throw toThrow
|
||||
}
|
||||
|
||||
/** Adds a task to kill an elasticsearch node with the given pidfile */
|
||||
private Task createStopTask() {
|
||||
final Fixture fixture = this
|
||||
final Object pid = "${ -> fixture.pid }"
|
||||
Exec stop = project.tasks.create(name: "${name}#stop", type: LoggedExec)
|
||||
stop.onlyIf { fixture.pidFile.exists() }
|
||||
stop.doFirst {
|
||||
logger.info("Shutting down ${fixture.name} with pid ${pid}")
|
||||
}
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
stop.executable = 'Taskkill'
|
||||
stop.args('/PID', pid, '/F')
|
||||
} else {
|
||||
stop.executable = 'kill'
|
||||
stop.args('-9', pid)
|
||||
}
|
||||
stop.doLast {
|
||||
project.delete(fixture.pidFile)
|
||||
}
|
||||
return stop
|
||||
}
|
||||
|
||||
/**
|
||||
* A path relative to the build dir that all configuration and runtime files
|
||||
* will live in for this fixture
|
||||
*/
|
||||
protected File getBaseDir() {
|
||||
return new File(project.buildDir, "fixtures/${name}")
|
||||
}
|
||||
|
||||
/** Returns the working directory for the process. Defaults to "cwd" inside baseDir. */
|
||||
protected File getCwd() {
|
||||
return new File(baseDir, 'cwd')
|
||||
}
|
||||
|
||||
/** Returns the file the process writes its pid to. Defaults to "pid" inside baseDir. */
|
||||
protected File getPidFile() {
|
||||
return new File(baseDir, 'pid')
|
||||
}
|
||||
|
||||
/** Reads the pid file and returns the process' pid */
|
||||
public int getPid() {
|
||||
return Integer.parseInt(pidFile.getText('UTF-8').trim())
|
||||
}
|
||||
|
||||
/** Returns the file the process writes its bound ports to. Defaults to "ports" inside baseDir. */
|
||||
protected File getPortsFile() {
|
||||
return new File(baseDir, 'ports')
|
||||
}
|
||||
|
||||
/** Returns an address and port suitable for a uri to connect to this node over http */
|
||||
public String getAddressAndPort() {
|
||||
return portsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
|
||||
/** Returns a file that wraps around the actual command when {@code spawn == true}. */
|
||||
protected File getWrapperScript() {
|
||||
return new File(cwd, Os.isFamily(Os.FAMILY_WINDOWS) ? 'run.bat' : 'run')
|
||||
}
|
||||
|
||||
/** Returns a file that the wrapper script writes when the command failed. */
|
||||
protected File getFailureMarker() {
|
||||
return new File(cwd, 'run.failed')
|
||||
}
|
||||
|
||||
/** Returns a file that the wrapper script writes when the command failed. */
|
||||
protected File getRunLog() {
|
||||
return new File(cwd, 'run.log')
|
||||
}
|
||||
}
|
|
@ -19,7 +19,6 @@
|
|||
package org.elasticsearch.gradle.test
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
|
@ -40,9 +39,18 @@ class NodeInfo {
|
|||
/** root directory all node files and operations happen under */
|
||||
File baseDir
|
||||
|
||||
/** shared data directory all nodes share */
|
||||
File sharedDir
|
||||
|
||||
/** the pid file the node will use */
|
||||
File pidFile
|
||||
|
||||
/** a file written by elasticsearch containing the ports of each bound address for http */
|
||||
File httpPortsFile
|
||||
|
||||
/** a file written by elasticsearch containing the ports of each bound address for transport */
|
||||
File transportPortsFile
|
||||
|
||||
/** elasticsearch home dir */
|
||||
File homeDir
|
||||
|
||||
|
@ -83,15 +91,20 @@ class NodeInfo {
|
|||
ByteArrayOutputStream buffer = new ByteArrayOutputStream()
|
||||
|
||||
/** Creates a node to run as part of a cluster for the given task */
|
||||
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task) {
|
||||
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task, String nodeVersion, File sharedDir) {
|
||||
this.config = config
|
||||
this.nodeNum = nodeNum
|
||||
this.sharedDir = sharedDir
|
||||
clusterName = "${task.path.replace(':', '_').substring(1)}"
|
||||
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
|
||||
pidFile = new File(baseDir, 'es.pid')
|
||||
homeDir = homeDir(baseDir, config.distribution)
|
||||
confDir = confDir(baseDir, config.distribution)
|
||||
homeDir = homeDir(baseDir, config.distribution, nodeVersion)
|
||||
confDir = confDir(baseDir, config.distribution, nodeVersion)
|
||||
configFile = new File(confDir, 'elasticsearch.yml')
|
||||
// even for rpm/deb, the logs are under home because we dont start with real services
|
||||
File logsDir = new File(homeDir, 'logs')
|
||||
httpPortsFile = new File(logsDir, 'http.ports')
|
||||
transportPortsFile = new File(logsDir, 'transport.ports')
|
||||
cwd = new File(baseDir, "cwd")
|
||||
failedMarker = new File(cwd, 'run.failed')
|
||||
startLog = new File(cwd, 'run.log')
|
||||
|
@ -115,17 +128,19 @@ class NodeInfo {
|
|||
args.add("${esScript}")
|
||||
}
|
||||
|
||||
env = [
|
||||
'JAVA_HOME' : project.javaHome,
|
||||
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
|
||||
]
|
||||
args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
|
||||
env = [ 'JAVA_HOME' : project.javaHome ]
|
||||
args.addAll("-E", "es.node.portsfile=true")
|
||||
String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")
|
||||
String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs
|
||||
env.put('ES_JAVA_OPTS', esJavaOpts)
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('es.')) {
|
||||
args.add("-D${property.getKey()}=${property.getValue()}")
|
||||
args.add("-E")
|
||||
args.add("${property.getKey()}=${property.getValue()}")
|
||||
}
|
||||
}
|
||||
args.add("-Des.path.conf=${confDir}")
|
||||
env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options'))
|
||||
args.addAll("-E", "es.path.conf=${confDir}")
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
args.add('"') // end the entire command, quoted
|
||||
}
|
||||
|
@ -159,24 +174,24 @@ class NodeInfo {
|
|||
wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
|
||||
}
|
||||
|
||||
/** Returns the http port for this node */
|
||||
int httpPort() {
|
||||
return config.baseHttpPort + nodeNum
|
||||
/** Returns an address and port suitable for a uri to connect to this node over http */
|
||||
String httpUri() {
|
||||
return httpPortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
|
||||
/** Returns the transport port for this node */
|
||||
int transportPort() {
|
||||
return config.baseTransportPort + nodeNum
|
||||
/** Returns an address and port suitable for a uri to connect to this node over transport protocol */
|
||||
String transportUri() {
|
||||
return transportPortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
|
||||
/** Returns the directory elasticsearch home is contained in for the given distribution */
|
||||
static File homeDir(File baseDir, String distro) {
|
||||
static File homeDir(File baseDir, String distro, String nodeVersion) {
|
||||
String path
|
||||
switch (distro) {
|
||||
case 'integ-test-zip':
|
||||
case 'zip':
|
||||
case 'tar':
|
||||
path = "elasticsearch-${VersionProperties.elasticsearch}"
|
||||
path = "elasticsearch-${nodeVersion}"
|
||||
break
|
||||
case 'rpm':
|
||||
case 'deb':
|
||||
|
@ -188,12 +203,12 @@ class NodeInfo {
|
|||
return new File(baseDir, path)
|
||||
}
|
||||
|
||||
static File confDir(File baseDir, String distro) {
|
||||
static File confDir(File baseDir, String distro, String nodeVersion) {
|
||||
switch (distro) {
|
||||
case 'integ-test-zip':
|
||||
case 'zip':
|
||||
case 'tar':
|
||||
return new File(homeDir(baseDir, distro), 'config')
|
||||
return new File(homeDir(baseDir, distro, nodeVersion), 'config')
|
||||
case 'rpm':
|
||||
case 'deb':
|
||||
return new File(baseDir, "${distro}-extracted/etc/elasticsearch")
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.gradle.test
|
|||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
|
@ -57,12 +56,16 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
RestSpecHack.configureDependencies(project)
|
||||
project.afterEvaluate {
|
||||
dependsOn(RestSpecHack.configureTask(project, includePackaged))
|
||||
systemProperty('tests.cluster', "localhost:${clusterConfig.baseTransportPort}")
|
||||
}
|
||||
// this must run after all projects have been configured, so we know any project
|
||||
// references can be accessed as a fully configured
|
||||
project.gradle.projectsEvaluated {
|
||||
ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
NodeInfo node = ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
systemProperty('tests.rest.cluster', "${-> node.httpUri()}")
|
||||
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
|
||||
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
|
||||
// both as separate sysprops
|
||||
systemProperty('tests.cluster', "${-> node.transportUri()}")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -82,4 +85,25 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
public ClusterConfiguration getCluster() {
|
||||
return clusterConfig
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task dependsOn(Object... dependencies) {
|
||||
super.dependsOn(dependencies)
|
||||
for (Object dependency : dependencies) {
|
||||
if (dependency instanceof Fixture) {
|
||||
finalizedBy(((Fixture)dependency).stopTask)
|
||||
}
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setDependsOn(Iterable<?> dependencies) {
|
||||
super.setDependsOn(dependencies)
|
||||
for (Object dependency : dependencies) {
|
||||
if (dependency instanceof Fixture) {
|
||||
finalizedBy(((Fixture)dependency).stopTask)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
package org.elasticsearch.gradle.test
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
import org.gradle.util.ConfigureUtil
|
||||
|
||||
public class RunTask extends DefaultTask {
|
||||
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration(baseHttpPort: 9200, baseTransportPort: 9300, daemonize: false)
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false)
|
||||
|
||||
public RunTask() {
|
||||
description = "Runs elasticsearch with '${project.path}'"
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks
|
|||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.plugins.ide.eclipse.model.EclipseClasspath
|
||||
|
||||
/** Configures the build to have a rest integration test. */
|
||||
public class StandaloneTestBasePlugin implements Plugin<Project> {
|
||||
|
@ -42,7 +41,7 @@ public class StandaloneTestBasePlugin implements Plugin<Project> {
|
|||
|
||||
// only setup tests to build
|
||||
project.sourceSets.create('test')
|
||||
project.dependencies.add('testCompile', "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}")
|
||||
project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}")
|
||||
|
||||
project.eclipse.classpath.sourceSets = [project.sourceSets.test]
|
||||
project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime]
|
||||
|
|
|
@ -41,6 +41,7 @@ public class StandaloneTestPlugin implements Plugin<Project> {
|
|||
]
|
||||
RandomizedTestingTask test = project.tasks.create(testOptions)
|
||||
test.configure(BuildPlugin.commonTestConfig(project))
|
||||
BuildPlugin.configureCompile(project)
|
||||
test.classpath = project.sourceSets.test.runtimeClasspath
|
||||
test.testClassesDir project.sourceSets.test.output.classesDir
|
||||
test.mustRunAfter(project.precommit)
|
||||
|
|
|
@ -19,8 +19,7 @@
|
|||
package org.elasticsearch.gradle.vagrant
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.tasks.*
|
||||
import org.gradle.logging.ProgressLogger
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.logging.ProgressLoggerFactory
|
||||
import org.gradle.process.internal.ExecAction
|
||||
import org.gradle.process.internal.ExecActionFactory
|
||||
|
|
|
@ -19,8 +19,7 @@
|
|||
package org.elasticsearch.gradle.vagrant
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.tasks.*
|
||||
import org.gradle.logging.ProgressLogger
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.logging.ProgressLoggerFactory
|
||||
import org.gradle.process.internal.ExecAction
|
||||
import org.gradle.process.internal.ExecActionFactory
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
<?xml version="1.0"?>
|
||||
<!DOCTYPE module PUBLIC
|
||||
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
|
||||
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
|
||||
|
||||
<module name="Checker">
|
||||
<property name="charset" value="UTF-8" />
|
||||
|
||||
<module name="SuppressionFilter">
|
||||
<property name="file" value="${suppressions}" />
|
||||
</module>
|
||||
|
||||
<module name="TreeWalker">
|
||||
<!-- Its our official line length! See checkstyle_suppressions.xml for the files that don't pass this. For now we
|
||||
suppress the check there but enforce it everywhere else. This prevents the list from getting longer even if it is
|
||||
unfair. -->
|
||||
<module name="LineLength">
|
||||
<property name="max" value="140"/>
|
||||
</module>
|
||||
|
||||
<module name="AvoidStarImport" />
|
||||
<!-- Doesn't pass but we could make it pass pretty quick.
|
||||
<module name="UnusedImports">
|
||||
The next property is optional. If we remove it then imports that are
|
||||
only referenced by Javadoc cause the check to fail.
|
||||
<property name="processJavadoc" value="true" />
|
||||
</module>
|
||||
-->
|
||||
|
||||
<!-- Non-inner classes must be in files that match their names. -->
|
||||
<module name="OuterTypeFilename" />
|
||||
<!-- No line wraps inside of import and package statements. -->
|
||||
<module name="NoLineWrap" />
|
||||
<!-- Each java file has only one outer class -->
|
||||
<module name="OneTopLevelClass" />
|
||||
<!-- The suffix L is preferred, because the letter l (ell) is often
|
||||
hard to distinguish from the digit 1 (one). -->
|
||||
<module name="UpperEll"/>
|
||||
|
||||
<module name="EqualsHashCode" />
|
||||
|
||||
<!-- We don't use Java's builtin serialization and we suppress all warning
|
||||
about it. The flip side of that coin is that we shouldn't _try_ to use
|
||||
it. We can't outright ban it with ForbiddenApis because it complain about
|
||||
every we reference a class that implements Serializable like String or
|
||||
Exception.
|
||||
-->
|
||||
<module name="RegexpSinglelineJava">
|
||||
<property name="format" value="serialVersionUID" />
|
||||
<property name="message" value="Do not declare serialVersionUID." />
|
||||
<property name="ignoreComments" value="true" />
|
||||
</module>
|
||||
<module name="RegexpSinglelineJava">
|
||||
<property name="format" value="java\.io\.Serializable" />
|
||||
<property name="message" value="References java.io.Serializable." />
|
||||
<property name="ignoreComments" value="true" />
|
||||
</module>
|
||||
<!-- end Orwellian suppression of Serializable -->
|
||||
</module>
|
||||
</module>
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,35 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
|
||||
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
|
||||
|
||||
java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057
|
||||
java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057
|
||||
|
||||
@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness
|
||||
java.util.Random#<init>()
|
||||
java.util.concurrent.ThreadLocalRandom
|
||||
|
||||
java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests
|
||||
|
||||
@defaultMessage this should not have been added to lucene in the first place
|
||||
org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()
|
||||
|
||||
@defaultMessage Soon to be removed
|
||||
org.apache.lucene.document.FieldType#numericType()
|
||||
|
||||
org.apache.lucene.document.InetAddressPoint#newPrefixQuery(java.lang.String, java.net.InetAddress, int) @LUCENE-7232
|
|
@ -41,14 +41,10 @@ org.apache.lucene.index.IndexReader#addReaderClosedListener(org.apache.lucene.in
|
|||
org.apache.lucene.index.IndexReader#removeReaderClosedListener(org.apache.lucene.index.IndexReader$ReaderClosedListener)
|
||||
|
||||
@defaultMessage Pass the precision step from the mappings explicitly instead
|
||||
org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
|
||||
@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead.
|
||||
java.lang.Object#wait()
|
||||
|
@ -88,9 +84,6 @@ java.util.concurrent.Future#cancel(boolean)
|
|||
org.elasticsearch.common.io.PathUtils#get(java.lang.String, java.lang.String[])
|
||||
org.elasticsearch.common.io.PathUtils#get(java.net.URI)
|
||||
|
||||
@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
|
||||
org.apache.lucene.search.Query#setBoost(float)
|
||||
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||
org.joda.time.DateTime#<init>()
|
||||
org.joda.time.DateTime#<init>(long)
|
|
@ -33,22 +33,6 @@ java.util.Formatter#<init>(java.lang.String,java.lang.String,java.util.Locale)
|
|||
java.io.RandomAccessFile
|
||||
java.nio.file.Path#toFile()
|
||||
|
||||
@defaultMessage Don't use deprecated lucene apis
|
||||
org.apache.lucene.index.DocsEnum
|
||||
org.apache.lucene.index.DocsAndPositionsEnum
|
||||
org.apache.lucene.queries.TermFilter
|
||||
org.apache.lucene.queries.TermsFilter
|
||||
org.apache.lucene.search.Filter
|
||||
org.apache.lucene.search.FilteredQuery
|
||||
org.apache.lucene.search.TermRangeFilter
|
||||
org.apache.lucene.search.NumericRangeFilter
|
||||
org.apache.lucene.search.PrefixFilter
|
||||
org.apache.lucene.search.QueryWrapperFilter
|
||||
org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter
|
||||
|
||||
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
|
||||
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
|
||||
|
||||
@defaultMessage Specify a location for the temp file/directory instead.
|
||||
java.nio.file.Files#createTempDirectory(java.lang.String,java.nio.file.attribute.FileAttribute[])
|
||||
java.nio.file.Files#createTempFile(java.lang.String,java.lang.String,java.nio.file.attribute.FileAttribute[])
|
||||
|
@ -61,9 +45,6 @@ java.io.ObjectInput
|
|||
|
||||
java.nio.file.Files#isHidden(java.nio.file.Path) @ Dependent on the operating system, use FileSystemUtils.isHidden instead
|
||||
|
||||
java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057
|
||||
java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057
|
||||
|
||||
@defaultMessage Resolve hosts explicitly to the address(es) you want with InetAddress.
|
||||
java.net.InetSocketAddress#<init>(java.lang.String,int)
|
||||
java.net.Socket#<init>(java.lang.String,int)
|
||||
|
@ -92,12 +73,16 @@ java.net.InetAddress#getCanonicalHostName()
|
|||
java.net.InetSocketAddress#getHostName() @ Use getHostString() instead, which avoids a DNS lookup
|
||||
|
||||
@defaultMessage Do not violate java's access system
|
||||
java.lang.Class#getDeclaredClasses() @ Do not violate java's access system: Use getClasses() instead
|
||||
java.lang.Class#getDeclaredConstructor(java.lang.Class[]) @ Do not violate java's access system: Use getConstructor() instead
|
||||
java.lang.Class#getDeclaredConstructors() @ Do not violate java's access system: Use getConstructors() instead
|
||||
java.lang.Class#getDeclaredField(java.lang.String) @ Do not violate java's access system: Use getField() instead
|
||||
java.lang.Class#getDeclaredFields() @ Do not violate java's access system: Use getFields() instead
|
||||
java.lang.Class#getDeclaredMethod(java.lang.String, java.lang.Class[]) @ Do not violate java's access system: Use getMethod() instead
|
||||
java.lang.Class#getDeclaredMethods() @ Do not violate java's access system: Use getMethods() instead
|
||||
java.lang.reflect.AccessibleObject#setAccessible(boolean)
|
||||
java.lang.reflect.AccessibleObject#setAccessible(java.lang.reflect.AccessibleObject[], boolean)
|
||||
|
||||
@defaultMessage this should not have been added to lucene in the first place
|
||||
org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()
|
||||
|
||||
@defaultMessage this method needs special permission
|
||||
java.lang.Thread#getAllStackTraces()
|
||||
|
||||
|
@ -116,3 +101,5 @@ java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a r
|
|||
java.util.Collections#EMPTY_LIST
|
||||
java.util.Collections#EMPTY_MAP
|
||||
java.util.Collections#EMPTY_SET
|
||||
|
||||
java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness
|
|
@ -0,0 +1,98 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
# Checks that we run against bytecode of third-party dependencies
|
||||
#
|
||||
# Be judicious about what is denied here: MANY classes will be subject
|
||||
# to these rules, so please try to keep the false positive rate low!
|
||||
#
|
||||
# Each third party .class failing checks will need to be explicitly
|
||||
# listed in the module's build.gradle file:
|
||||
#
|
||||
# thirdPartyAudit.excludes = [
|
||||
# // uses internal java api: sun.misc.Unsafe
|
||||
# 'org.foo.Bar',
|
||||
# // missing class!
|
||||
# 'com.missing.dependency.WTF',
|
||||
# // ...
|
||||
# ]
|
||||
#
|
||||
# Wildcards are not allowed, excludes must be exact. The build also fails with
|
||||
# the message "Invalid exclusions, nothing is wrong with these classes" if
|
||||
# extraneous classes are in the excludes list, this ensures the list is
|
||||
# up-to-date, and that each module accurately documents the evil things
|
||||
# that its dependencies do.
|
||||
#
|
||||
# For more information, look at ThirdPartyAuditTask.groovy in buildSrc/
|
||||
|
||||
#
|
||||
# Ruleset to fail on java internal apis, using this logic:
|
||||
# http://docs.oracle.com/javase/8/docs/api/java/lang/SecurityManager.html#checkPackageAccess-java.lang.String-
|
||||
#
|
||||
# // The list may change at any time, regenerated with:
|
||||
# for (String pkg : new TreeSet<>(Arrays.asList(
|
||||
# Security.getProperty("package.access").split(",")))) {
|
||||
# System.out.println(pkg + "**");
|
||||
# }
|
||||
#
|
||||
@defaultMessage non-public internal runtime class
|
||||
com.oracle.webservices.internal.**
|
||||
com.oracle.xmlns.internal.**
|
||||
com.sun.activation.registries.**
|
||||
com.sun.browser.**
|
||||
com.sun.corba.se.**
|
||||
com.sun.glass.**
|
||||
com.sun.imageio.**
|
||||
com.sun.istack.internal.**
|
||||
com.sun.javafx.**
|
||||
com.sun.jmx.**
|
||||
com.sun.media.**
|
||||
com.sun.media.sound.**
|
||||
com.sun.naming.internal.**
|
||||
com.sun.openpisces.**
|
||||
com.sun.org.apache.bcel.internal.**
|
||||
com.sun.org.apache.regexp.internal.**
|
||||
com.sun.org.apache.xalan.internal.extensions.**
|
||||
com.sun.org.apache.xalan.internal.lib.**
|
||||
com.sun.org.apache.xalan.internal.res.**
|
||||
com.sun.org.apache.xalan.internal.templates.**
|
||||
com.sun.org.apache.xalan.internal.utils.**
|
||||
com.sun.org.apache.xalan.internal.xslt.**
|
||||
com.sun.org.apache.xalan.internal.xsltc.cmdline.**
|
||||
com.sun.org.apache.xalan.internal.xsltc.compiler.**
|
||||
com.sun.org.apache.xalan.internal.xsltc.trax.**
|
||||
com.sun.org.apache.xalan.internal.xsltc.util.**
|
||||
com.sun.org.apache.xerces.internal.**
|
||||
com.sun.org.apache.xml.internal.res.**
|
||||
com.sun.org.apache.xml.internal.security.**
|
||||
com.sun.org.apache.xml.internal.serializer.utils.**
|
||||
com.sun.org.apache.xml.internal.utils.**
|
||||
com.sun.org.apache.xpath.internal.**
|
||||
com.sun.org.glassfish.**
|
||||
com.sun.pisces.**
|
||||
com.sun.prism.**
|
||||
com.sun.proxy.**
|
||||
com.sun.scenario.**
|
||||
com.sun.t2k.**
|
||||
com.sun.webkit.**
|
||||
com.sun.xml.internal.**
|
||||
jdk.internal.**
|
||||
jdk.management.resource.internal.**
|
||||
jdk.nashorn.internal.**
|
||||
jdk.nashorn.tools.**
|
||||
oracle.jrockit.jfr.**
|
||||
org.jcp.xml.dsig.internal.**
|
||||
sun.**
|
|
@ -1,27 +1,15 @@
|
|||
# Elasticsearch plugin descriptor file
|
||||
# This file must exist as 'plugin-descriptor.properties' at
|
||||
# the root directory of all plugins.
|
||||
# This file must exist as 'plugin-descriptor.properties' in a folder named `elasticsearch`
|
||||
# inside all plugins.
|
||||
#
|
||||
# A plugin can be 'site', 'jvm', or both.
|
||||
#
|
||||
### example site plugin for "foo":
|
||||
### example plugin for "foo"
|
||||
#
|
||||
# foo.zip <-- zip file for the plugin, with this structure:
|
||||
# _site/ <-- the contents that will be served
|
||||
# plugin-descriptor.properties <-- example contents below:
|
||||
#|____elasticsearch/
|
||||
#| |____ <arbitrary name1>.jar <-- classes, resources, dependencies
|
||||
#| |____ <arbitrary nameN>.jar <-- any number of jars
|
||||
#| |____ plugin-descriptor.properties <-- example contents below:
|
||||
#
|
||||
# site=true
|
||||
# description=My cool plugin
|
||||
# version=1.0
|
||||
#
|
||||
### example jvm plugin for "foo"
|
||||
#
|
||||
# foo.zip <-- zip file for the plugin, with this structure:
|
||||
# <arbitrary name1>.jar <-- classes, resources, dependencies
|
||||
# <arbitrary nameN>.jar <-- any number of jars
|
||||
# plugin-descriptor.properties <-- example contents below:
|
||||
#
|
||||
# jvm=true
|
||||
# classname=foo.bar.BazPlugin
|
||||
# description=My cool plugin
|
||||
# version=2.0
|
||||
|
@ -38,21 +26,6 @@ version=${version}
|
|||
#
|
||||
# 'name': the plugin name
|
||||
name=${name}
|
||||
|
||||
### mandatory elements for site plugins:
|
||||
#
|
||||
# 'site': set to true to indicate contents of the _site/
|
||||
# directory in the root of the plugin should be served.
|
||||
site=${site}
|
||||
#
|
||||
### mandatory elements for jvm plugins :
|
||||
#
|
||||
# 'jvm': true if the 'classname' class should be loaded
|
||||
# from jar files in the root directory of the plugin.
|
||||
# Note that only jar files in the root directory are
|
||||
# added to the classpath for the plugin! If you need
|
||||
# other resources, package them into a resources jar.
|
||||
jvm=${jvm}
|
||||
#
|
||||
# 'classname': the name of the class to load, fully-qualified.
|
||||
classname=${classname}
|
||||
|
@ -65,12 +38,3 @@ java.version=${javaVersion}
|
|||
#
|
||||
# 'elasticsearch.version' version of elasticsearch compiled against
|
||||
elasticsearch.version=${elasticsearchVersion}
|
||||
#
|
||||
### deprecated elements for jvm plugins :
|
||||
#
|
||||
# 'isolated': true if the plugin should have its own classloader.
|
||||
# passing false is deprecated, and only intended to support plugins
|
||||
# that have hard dependencies against each other. If this is
|
||||
# not specified, then the plugin is isolated by default.
|
||||
isolated=${isolated}
|
||||
#
|
|
@ -1,19 +1,20 @@
|
|||
elasticsearch = 3.0.0-SNAPSHOT
|
||||
lucene = 5.4.0-snapshot-1715952
|
||||
elasticsearch = 5.0.0-alpha2
|
||||
lucene = 6.0.0
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.5
|
||||
spatial4j = 0.6
|
||||
jts = 1.13
|
||||
mustache = 0.9.1
|
||||
jackson = 2.6.2
|
||||
jackson = 2.7.1
|
||||
log4j = 1.2.17
|
||||
slf4j = 1.6.2
|
||||
jna = 4.1.0
|
||||
|
||||
|
||||
# test dependencies
|
||||
randomizedrunner = 2.2.0
|
||||
randomizedrunner = 2.3.2
|
||||
junit = 4.11
|
||||
# TODO: Upgrade httpclient to a version > 4.5.1 once released. Then remove o.e.test.rest.client.StrictHostnameVerifier* and use
|
||||
# DefaultHostnameVerifier instead since we no longer need to workaround https://issues.apache.org/jira/browse/HTTPCLIENT-1698
|
||||
httpclient = 4.3.6
|
||||
httpcore = 4.3.3
|
||||
commonslogging = 1.1.3
|
||||
|
|
|
@ -219,7 +219,7 @@ h1. License
|
|||
<pre>
|
||||
This software is licensed under the Apache License, version 2 ("ALv2"), quoted below.
|
||||
|
||||
Copyright 2009-2015 Elasticsearch <https://www.elastic.co>
|
||||
Copyright 2009-2016 Elasticsearch <https://www.elastic.co>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
use this file except in compliance with the License. You may obtain a copy of
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.test.RestSpecHack
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'com.bmuschko.nexus'
|
||||
|
@ -42,13 +42,14 @@ dependencies {
|
|||
compile "org.apache.lucene:lucene-queryparser:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-sandbox:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial-extras:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
|
||||
|
||||
compile 'org.elasticsearch:securesm:1.0'
|
||||
|
||||
// utilities
|
||||
compile 'commons-cli:commons-cli:1.3.1'
|
||||
compile 'net.sf.jopt-simple:jopt-simple:4.9'
|
||||
compile 'com.carrotsearch:hppc:0.7.1'
|
||||
|
||||
// time handling, remove with java 8 time
|
||||
|
@ -71,21 +72,17 @@ dependencies {
|
|||
compile 'org.hdrhistogram:HdrHistogram:2.1.6'
|
||||
|
||||
// lucene spatial
|
||||
compile "com.spatial4j:spatial4j:${versions.spatial4j}", optional
|
||||
compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional
|
||||
compile "com.vividsolutions:jts:${versions.jts}", optional
|
||||
|
||||
// templating
|
||||
compile "com.github.spullara.mustache.java:compiler:${versions.mustache}", optional
|
||||
|
||||
// logging
|
||||
compile "log4j:log4j:${versions.log4j}", optional
|
||||
compile "log4j:apache-log4j-extras:${versions.log4j}", optional
|
||||
compile "org.slf4j:slf4j-api:${versions.slf4j}", optional
|
||||
|
||||
compile "net.java.dev.jna:jna:${versions.jna}", optional
|
||||
|
||||
if (isEclipse == false || project.path == ":core-tests") {
|
||||
testCompile("org.elasticsearch:test-framework:${version}") {
|
||||
testCompile("org.elasticsearch.test:framework:${version}") {
|
||||
// tests use the locally compiled version of core
|
||||
exclude group: 'org.elasticsearch', module: 'elasticsearch'
|
||||
}
|
||||
|
@ -105,8 +102,8 @@ if (isEclipse) {
|
|||
}
|
||||
}
|
||||
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-fallthrough,-overrides,-rawtypes,-serial,-try,-unchecked"
|
||||
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-fallthrough,-overrides,-rawtypes,-serial,-try,-unchecked"
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
|
||||
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
|
||||
|
||||
forbiddenPatterns {
|
||||
exclude '**/*.json'
|
||||
|
@ -114,6 +111,119 @@ forbiddenPatterns {
|
|||
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
|
||||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
|
||||
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
|
||||
|
||||
// classes are missing!
|
||||
|
||||
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
|
||||
'com.fasterxml.jackson.databind.ObjectMapper',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder (netty)
|
||||
'com.google.protobuf.CodedInputStream',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender (netty)
|
||||
'com.google.protobuf.CodedOutputStream',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufDecoder (netty)
|
||||
'com.google.protobuf.ExtensionRegistry',
|
||||
'com.google.protobuf.MessageLite$Builder',
|
||||
'com.google.protobuf.MessageLite',
|
||||
'com.google.protobuf.Parser',
|
||||
|
||||
// from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras)
|
||||
'javax.jms.Message',
|
||||
'javax.jms.MessageListener',
|
||||
'javax.jms.ObjectMessage',
|
||||
'javax.jms.TopicConnection',
|
||||
'javax.jms.TopicConnectionFactory',
|
||||
'javax.jms.TopicPublisher',
|
||||
'javax.jms.TopicSession',
|
||||
'javax.jms.TopicSubscriber',
|
||||
|
||||
// from org.apache.log4j.net.SMTPAppender (log4j)
|
||||
'javax.mail.Authenticator',
|
||||
'javax.mail.Message$RecipientType',
|
||||
'javax.mail.Message',
|
||||
'javax.mail.Multipart',
|
||||
'javax.mail.PasswordAuthentication',
|
||||
'javax.mail.Session',
|
||||
'javax.mail.Transport',
|
||||
'javax.mail.internet.InternetAddress',
|
||||
'javax.mail.internet.InternetHeaders',
|
||||
'javax.mail.internet.MimeBodyPart',
|
||||
'javax.mail.internet.MimeMessage',
|
||||
'javax.mail.internet.MimeMultipart',
|
||||
'javax.mail.internet.MimeUtility',
|
||||
|
||||
// from org.jboss.netty.channel.socket.http.HttpTunnelingServlet (netty)
|
||||
'javax.servlet.ServletConfig',
|
||||
'javax.servlet.ServletException',
|
||||
'javax.servlet.ServletOutputStream',
|
||||
'javax.servlet.http.HttpServlet',
|
||||
'javax.servlet.http.HttpServletRequest',
|
||||
'javax.servlet.http.HttpServletResponse',
|
||||
|
||||
// from org.jboss.netty.logging.CommonsLoggerFactory (netty)
|
||||
'org.apache.commons.logging.Log',
|
||||
'org.apache.commons.logging.LogFactory',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
|
||||
'org.apache.tomcat.jni.Buffer',
|
||||
'org.apache.tomcat.jni.Library',
|
||||
'org.apache.tomcat.jni.Pool',
|
||||
'org.apache.tomcat.jni.SSL',
|
||||
'org.apache.tomcat.jni.SSLContext',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty)
|
||||
'org.bouncycastle.asn1.x500.X500Name',
|
||||
'org.bouncycastle.cert.X509v3CertificateBuilder',
|
||||
'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter',
|
||||
'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder',
|
||||
'org.bouncycastle.jce.provider.BouncyCastleProvider',
|
||||
'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.JettyNpnSslEngine (netty)
|
||||
'org.eclipse.jetty.npn.NextProtoNego$ClientProvider',
|
||||
'org.eclipse.jetty.npn.NextProtoNego$ServerProvider',
|
||||
'org.eclipse.jetty.npn.NextProtoNego',
|
||||
|
||||
// from org.jboss.netty.logging.JBossLoggerFactory (netty)
|
||||
'org.jboss.logging.Logger',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteInput (netty)
|
||||
'org.jboss.marshalling.ByteInput',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty)
|
||||
'org.jboss.marshalling.ByteOutput',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty)
|
||||
'org.jboss.marshalling.Marshaller',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty)
|
||||
'org.jboss.marshalling.MarshallerFactory',
|
||||
'org.jboss.marshalling.MarshallingConfiguration',
|
||||
'org.jboss.marshalling.Unmarshaller',
|
||||
|
||||
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
|
||||
'org.noggit.JSONParser',
|
||||
|
||||
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
|
||||
'org.osgi.framework.BundleActivator',
|
||||
'org.osgi.framework.BundleContext',
|
||||
|
||||
// from org.jboss.netty.logging.OsgiLoggerFactory$1 (netty)
|
||||
'org.osgi.framework.ServiceReference',
|
||||
'org.osgi.service.log.LogService',
|
||||
'org.osgi.util.tracker.ServiceTracker',
|
||||
'org.osgi.util.tracker.ServiceTrackerCustomizer',
|
||||
|
||||
// from org.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional
|
||||
'org.slf4j.Logger',
|
||||
'org.slf4j.LoggerFactory',
|
||||
]
|
||||
|
||||
// dependency license are currently checked in distribution
|
||||
dependencyLicenses.enabled = false
|
||||
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.document;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* Forked utility methods from Lucene's InetAddressPoint until LUCENE-7232 and
|
||||
* LUCENE-7234 are released.
|
||||
*/
|
||||
// TODO: remove me when we upgrade to Lucene 6.1
|
||||
@SuppressForbidden(reason="uses InetAddress.getHostAddress")
|
||||
public final class XInetAddressPoint {
|
||||
|
||||
private XInetAddressPoint() {}
|
||||
|
||||
/** The minimum value that an ip address can hold. */
|
||||
public static final InetAddress MIN_VALUE;
|
||||
/** The maximum value that an ip address can hold. */
|
||||
public static final InetAddress MAX_VALUE;
|
||||
static {
|
||||
MIN_VALUE = InetAddressPoint.decode(new byte[InetAddressPoint.BYTES]);
|
||||
byte[] maxValueBytes = new byte[InetAddressPoint.BYTES];
|
||||
Arrays.fill(maxValueBytes, (byte) 0xFF);
|
||||
MAX_VALUE = InetAddressPoint.decode(maxValueBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link InetAddress} that compares immediately greater than
|
||||
* {@code address}.
|
||||
* @throws ArithmeticException if the provided address is the
|
||||
* {@link #MAX_VALUE maximum ip address}
|
||||
*/
|
||||
public static InetAddress nextUp(InetAddress address) {
|
||||
if (address.equals(MAX_VALUE)) {
|
||||
throw new ArithmeticException("Overflow: there is no greater InetAddress than "
|
||||
+ address.getHostAddress());
|
||||
}
|
||||
byte[] delta = new byte[InetAddressPoint.BYTES];
|
||||
delta[InetAddressPoint.BYTES-1] = 1;
|
||||
byte[] nextUpBytes = new byte[InetAddressPoint.BYTES];
|
||||
NumericUtils.add(InetAddressPoint.BYTES, 0, InetAddressPoint.encode(address), delta, nextUpBytes);
|
||||
return InetAddressPoint.decode(nextUpBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link InetAddress} that compares immediately less than
|
||||
* {@code address}.
|
||||
* @throws ArithmeticException if the provided address is the
|
||||
* {@link #MIN_VALUE minimum ip address}
|
||||
*/
|
||||
public static InetAddress nextDown(InetAddress address) {
|
||||
if (address.equals(MIN_VALUE)) {
|
||||
throw new ArithmeticException("Underflow: there is no smaller InetAddress than "
|
||||
+ address.getHostAddress());
|
||||
}
|
||||
byte[] delta = new byte[InetAddressPoint.BYTES];
|
||||
delta[InetAddressPoint.BYTES-1] = 1;
|
||||
byte[] nextDownBytes = new byte[InetAddressPoint.BYTES];
|
||||
NumericUtils.subtract(InetAddressPoint.BYTES, 0, InetAddressPoint.encode(address), delta, nextDownBytes);
|
||||
return InetAddressPoint.decode(nextDownBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a prefix query for matching a CIDR network range.
|
||||
*
|
||||
* @param field field name. must not be {@code null}.
|
||||
* @param value any host address
|
||||
* @param prefixLength the network prefix length for this address. This is also known as the subnet mask in the context of IPv4
|
||||
* addresses.
|
||||
* @throws IllegalArgumentException if {@code field} is null, or prefixLength is invalid.
|
||||
* @return a query matching documents with addresses contained within this network
|
||||
*/
|
||||
// TODO: remove me when we upgrade to Lucene 6.0.1
|
||||
public static Query newPrefixQuery(String field, InetAddress value, int prefixLength) {
|
||||
if (value == null) {
|
||||
throw new IllegalArgumentException("InetAddress must not be null");
|
||||
}
|
||||
if (prefixLength < 0 || prefixLength > 8 * value.getAddress().length) {
|
||||
throw new IllegalArgumentException("illegal prefixLength '" + prefixLength
|
||||
+ "'. Must be 0-32 for IPv4 ranges, 0-128 for IPv6 ranges");
|
||||
}
|
||||
// create the lower value by zeroing out the host portion, upper value by filling it with all ones.
|
||||
byte lower[] = value.getAddress();
|
||||
byte upper[] = value.getAddress();
|
||||
for (int i = prefixLength; i < 8 * lower.length; i++) {
|
||||
int m = 1 << (7 - (i & 7));
|
||||
lower[i >> 3] &= ~m;
|
||||
upper[i >> 3] |= m;
|
||||
}
|
||||
try {
|
||||
return InetAddressPoint.newRangeQuery(field, InetAddress.getByAddress(lower), InetAddress.getByAddress(upper));
|
||||
} catch (UnknownHostException e) {
|
||||
throw new AssertionError(e); // values are coming from InetAddress
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.index;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Forked utility methods from Lucene's PointValues until LUCENE-7257 is released.
|
||||
*/
|
||||
public class XPointValues {
|
||||
/** Return the cumulated number of points across all leaves of the given
|
||||
* {@link IndexReader}. Leaves that do not have points for the given field
|
||||
* are ignored.
|
||||
* @see PointValues#size(String) */
|
||||
public static long size(IndexReader reader, String field) throws IOException {
|
||||
long size = 0;
|
||||
for (LeafReaderContext ctx : reader.leaves()) {
|
||||
FieldInfo info = ctx.reader().getFieldInfos().fieldInfo(field);
|
||||
if (info == null || info.getPointDimensionCount() == 0) {
|
||||
continue;
|
||||
}
|
||||
PointValues values = ctx.reader().getPointValues();
|
||||
size += values.size(field);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
/** Return the cumulated number of docs that have points across all leaves
|
||||
* of the given {@link IndexReader}. Leaves that do not have points for the
|
||||
* given field are ignored.
|
||||
* @see PointValues#getDocCount(String) */
|
||||
public static int getDocCount(IndexReader reader, String field) throws IOException {
|
||||
int count = 0;
|
||||
for (LeafReaderContext ctx : reader.leaves()) {
|
||||
FieldInfo info = ctx.reader().getFieldInfos().fieldInfo(field);
|
||||
if (info == null || info.getPointDimensionCount() == 0) {
|
||||
continue;
|
||||
}
|
||||
PointValues values = ctx.reader().getPointValues();
|
||||
count += values.getDocCount(field);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
/** Return the minimum packed values across all leaves of the given
|
||||
* {@link IndexReader}. Leaves that do not have points for the given field
|
||||
* are ignored.
|
||||
* @see PointValues#getMinPackedValue(String) */
|
||||
public static byte[] getMinPackedValue(IndexReader reader, String field) throws IOException {
|
||||
byte[] minValue = null;
|
||||
for (LeafReaderContext ctx : reader.leaves()) {
|
||||
FieldInfo info = ctx.reader().getFieldInfos().fieldInfo(field);
|
||||
if (info == null || info.getPointDimensionCount() == 0) {
|
||||
continue;
|
||||
}
|
||||
PointValues values = ctx.reader().getPointValues();
|
||||
byte[] leafMinValue = values.getMinPackedValue(field);
|
||||
if (leafMinValue == null) {
|
||||
continue;
|
||||
}
|
||||
if (minValue == null) {
|
||||
minValue = leafMinValue.clone();
|
||||
} else {
|
||||
final int numDimensions = values.getNumDimensions(field);
|
||||
final int numBytesPerDimension = values.getBytesPerDimension(field);
|
||||
for (int i = 0; i < numDimensions; ++i) {
|
||||
int offset = i * numBytesPerDimension;
|
||||
if (StringHelper.compare(numBytesPerDimension, leafMinValue, offset, minValue, offset) < 0) {
|
||||
System.arraycopy(leafMinValue, offset, minValue, offset, numBytesPerDimension);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return minValue;
|
||||
}
|
||||
|
||||
/** Return the maximum packed values across all leaves of the given
|
||||
* {@link IndexReader}. Leaves that do not have points for the given field
|
||||
* are ignored.
|
||||
* @see PointValues#getMaxPackedValue(String) */
|
||||
public static byte[] getMaxPackedValue(IndexReader reader, String field) throws IOException {
|
||||
byte[] maxValue = null;
|
||||
for (LeafReaderContext ctx : reader.leaves()) {
|
||||
FieldInfo info = ctx.reader().getFieldInfos().fieldInfo(field);
|
||||
if (info == null || info.getPointDimensionCount() == 0) {
|
||||
continue;
|
||||
}
|
||||
PointValues values = ctx.reader().getPointValues();
|
||||
byte[] leafMaxValue = values.getMaxPackedValue(field);
|
||||
if (leafMaxValue == null) {
|
||||
continue;
|
||||
}
|
||||
if (maxValue == null) {
|
||||
maxValue = leafMaxValue.clone();
|
||||
} else {
|
||||
final int numDimensions = values.getNumDimensions(field);
|
||||
final int numBytesPerDimension = values.getBytesPerDimension(field);
|
||||
for (int i = 0; i < numDimensions; ++i) {
|
||||
int offset = i * numBytesPerDimension;
|
||||
if (StringHelper.compare(numBytesPerDimension, leafMaxValue, offset, maxValue, offset) > 0) {
|
||||
System.arraycopy(leafMaxValue, offset, maxValue, offset, numBytesPerDimension);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return maxValue;
|
||||
}
|
||||
|
||||
/** Default constructor */
|
||||
private XPointValues() {
|
||||
}
|
||||
}
|
|
@ -18,14 +18,24 @@
|
|||
*/
|
||||
package org.apache.lucene.queries;
|
||||
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermContext;
|
||||
import org.apache.lucene.index.TermState;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.InPlaceMergeSorter;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
@ -44,7 +54,7 @@ import java.util.Objects;
|
|||
* While aggregating the total term frequency is trivial since it
|
||||
* can be summed up not every {@link org.apache.lucene.search.similarities.Similarity}
|
||||
* makes use of this statistic. The document frequency which is used in the
|
||||
* {@link org.apache.lucene.search.similarities.DefaultSimilarity}
|
||||
* {@link org.apache.lucene.search.similarities.ClassicSimilarity}
|
||||
* can only be estimated as an lower-bound since it is a document based statistic. For
|
||||
* the document frequency the maximum frequency across all fields per term is used
|
||||
* which is the minimum number of documents the terms occurs in.
|
||||
|
@ -227,6 +237,10 @@ public abstract class BlendedTermQuery extends Query {
|
|||
return newCtx;
|
||||
}
|
||||
|
||||
public List<Term> getTerms() {
|
||||
return Arrays.asList(terms);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
StringBuilder builder = new StringBuilder("blended(terms:[");
|
||||
|
@ -236,14 +250,15 @@ public abstract class BlendedTermQuery extends Query {
|
|||
if (boosts != null) {
|
||||
boost = boosts[i];
|
||||
}
|
||||
builder.append(ToStringUtils.boost(boost));
|
||||
if (boost != 1f) {
|
||||
builder.append('^').append(boost);
|
||||
}
|
||||
builder.append(", ");
|
||||
}
|
||||
if (terms.length > 0) {
|
||||
builder.setLength(builder.length() - 2);
|
||||
}
|
||||
builder.append("])");
|
||||
builder.append(ToStringUtils.boost(getBoost()));
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
|
@ -316,7 +331,7 @@ public abstract class BlendedTermQuery extends Query {
|
|||
}
|
||||
if ((maxTermFrequency >= 1f && docFreqs[i] > maxTermFrequency)
|
||||
|| (docFreqs[i] > (int) Math.ceil(maxTermFrequency
|
||||
* (float) maxDoc))) {
|
||||
* maxDoc))) {
|
||||
highBuilder.add(query, BooleanClause.Occur.SHOULD);
|
||||
} else {
|
||||
lowBuilder.add(query, BooleanClause.Occur.SHOULD);
|
||||
|
@ -352,15 +367,15 @@ public abstract class BlendedTermQuery extends Query {
|
|||
return new BlendedTermQuery(terms, boosts) {
|
||||
@Override
|
||||
protected Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(tieBreakerMultiplier);
|
||||
List<Query> queries = new ArrayList<>(ctx.length);
|
||||
for (int i = 0; i < terms.length; i++) {
|
||||
Query query = new TermQuery(terms[i], ctx[i]);
|
||||
if (boosts != null && boosts[i] != 1f) {
|
||||
query = new BoostQuery(query, boosts[i]);
|
||||
}
|
||||
disMaxQuery.add(query);
|
||||
queries.add(query);
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, tieBreakerMultiplier);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -22,8 +22,18 @@ package org.apache.lucene.queryparser.classic;
|
|||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.MultiPhraseQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SynonymQuery;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.automaton.RegExp;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
|
@ -31,11 +41,17 @@ import org.elasticsearch.common.unit.Fuzziness;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LegacyDateFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.support.QueryParsers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
|
||||
|
@ -54,7 +70,6 @@ public class MapperQueryParser extends QueryParser {
|
|||
static {
|
||||
Map<String, FieldQueryExtension> fieldQueryExtensions = new HashMap<>();
|
||||
fieldQueryExtensions.put(ExistsFieldQueryExtension.NAME, new ExistsFieldQueryExtension());
|
||||
fieldQueryExtensions.put(MissingFieldQueryExtension.NAME, new MissingFieldQueryExtension());
|
||||
FIELD_QUERY_EXTENSIONS = unmodifiableMap(fieldQueryExtensions);
|
||||
}
|
||||
|
||||
|
@ -93,7 +108,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
|
||||
/**
|
||||
* We override this one so we can get the fuzzy part to be treated as string, so people can do: "age:10~5" or "timestamp:2012-10-10~5d"
|
||||
* We override this one so we can get the fuzzy part to be treated as string,
|
||||
* so people can do: "age:10~5" or "timestamp:2012-10-10~5d"
|
||||
*/
|
||||
@Override
|
||||
Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) throws ParseException {
|
||||
|
@ -131,19 +147,19 @@ public class MapperQueryParser extends QueryParser {
|
|||
return getFieldQuerySingle(fields.iterator().next(), queryText, quoted);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getFieldQuerySingle(mField, queryText, quoted);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -152,9 +168,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
if (clauses.isEmpty()) return null; // happens for stopwords
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getFieldQuerySingle(field, queryText, quoted);
|
||||
|
@ -203,7 +218,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (currentFieldType != null) {
|
||||
Query query = null;
|
||||
if (currentFieldType.useTermQueryWithQueryString()) {
|
||||
if (currentFieldType.tokenized() == false) {
|
||||
// this might be a structured field like a numeric
|
||||
try {
|
||||
query = currentFieldType.termQuery(queryText, context);
|
||||
} catch (RuntimeException e) {
|
||||
|
@ -215,7 +231,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
}
|
||||
if (query == null) {
|
||||
query = super.getFieldQuery(currentFieldType.names().indexName(), queryText, quoted);
|
||||
query = super.getFieldQuery(currentFieldType.name(), queryText, quoted);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
@ -231,20 +247,20 @@ public class MapperQueryParser extends QueryParser {
|
|||
Collection<String> fields = extractMultiFields(field);
|
||||
if (fields != null) {
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = super.getFieldQuery(mField, queryText, slop);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
q = applySlop(q, slop);
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -254,9 +270,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
if (clauses.isEmpty()) return null; // happens for stopwords
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return super.getFieldQuery(field, queryText, slop);
|
||||
|
@ -264,7 +279,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Query getRangeQuery(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) throws ParseException {
|
||||
protected Query getRangeQuery(String field, String part1, String part2,
|
||||
boolean startInclusive, boolean endInclusive) throws ParseException {
|
||||
if ("*".equals(part1)) {
|
||||
part1 = null;
|
||||
}
|
||||
|
@ -284,19 +300,19 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -305,23 +321,26 @@ public class MapperQueryParser extends QueryParser {
|
|||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
if (clauses.isEmpty()) return null; // happens for stopwords
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
}
|
||||
|
||||
private Query getRangeQuerySingle(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) {
|
||||
private Query getRangeQuerySingle(String field, String part1, String part2,
|
||||
boolean startInclusive, boolean endInclusive) {
|
||||
currentFieldType = context.fieldMapper(field);
|
||||
if (currentFieldType != null) {
|
||||
if (lowercaseExpandedTerms && !currentFieldType.isNumeric()) {
|
||||
if (lowercaseExpandedTerms && currentFieldType.tokenized()) {
|
||||
part1 = part1 == null ? null : part1.toLowerCase(locale);
|
||||
part2 = part2 == null ? null : part2.toLowerCase(locale);
|
||||
}
|
||||
|
||||
try {
|
||||
Query rangeQuery;
|
||||
if (currentFieldType instanceof DateFieldMapper.DateFieldType && settings.timeZone() != null) {
|
||||
if (currentFieldType instanceof LegacyDateFieldMapper.DateFieldType && settings.timeZone() != null) {
|
||||
LegacyDateFieldMapper.DateFieldType dateFieldType = (LegacyDateFieldMapper.DateFieldType) this.currentFieldType;
|
||||
rangeQuery = dateFieldType.rangeQuery(part1, part2, startInclusive, endInclusive, settings.timeZone(), null);
|
||||
} else if (currentFieldType instanceof DateFieldMapper.DateFieldType && settings.timeZone() != null) {
|
||||
DateFieldMapper.DateFieldType dateFieldType = (DateFieldMapper.DateFieldType) this.currentFieldType;
|
||||
rangeQuery = dateFieldType.rangeQuery(part1, part2, startInclusive, endInclusive, settings.timeZone(), null);
|
||||
} else {
|
||||
|
@ -348,19 +367,19 @@ public class MapperQueryParser extends QueryParser {
|
|||
return getFuzzyQuerySingle(fields.iterator().next(), termStr, minSimilarity);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -369,7 +388,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getFuzzyQuerySingle(field, termStr, minSimilarity);
|
||||
|
@ -380,7 +399,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
currentFieldType = context.fieldMapper(field);
|
||||
if (currentFieldType != null) {
|
||||
try {
|
||||
return currentFieldType.fuzzyQuery(termStr, Fuzziness.build(minSimilarity), fuzzyPrefixLength, settings.fuzzyMaxExpansions(), FuzzyQuery.defaultTranspositions);
|
||||
return currentFieldType.fuzzyQuery(termStr, Fuzziness.build(minSimilarity),
|
||||
fuzzyPrefixLength, settings.fuzzyMaxExpansions(), FuzzyQuery.defaultTranspositions);
|
||||
} catch (RuntimeException e) {
|
||||
if (settings.lenient()) {
|
||||
return null;
|
||||
|
@ -395,7 +415,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
protected Query newFuzzyQuery(Term term, float minimumSimilarity, int prefixLength) {
|
||||
String text = term.text();
|
||||
int numEdits = FuzzyQuery.floatToEdits(minimumSimilarity, text.codePointCount(0, text.length()));
|
||||
FuzzyQuery query = new FuzzyQuery(term, numEdits, prefixLength, settings.fuzzyMaxExpansions(), FuzzyQuery.defaultTranspositions);
|
||||
FuzzyQuery query = new FuzzyQuery(term, numEdits, prefixLength,
|
||||
settings.fuzzyMaxExpansions(), FuzzyQuery.defaultTranspositions);
|
||||
QueryParsers.setRewriteMethod(query, settings.fuzzyRewriteMethod());
|
||||
return query;
|
||||
}
|
||||
|
@ -411,19 +432,19 @@ public class MapperQueryParser extends QueryParser {
|
|||
return getPrefixQuerySingle(fields.iterator().next(), termStr);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getPrefixQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -432,9 +453,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
if (clauses.isEmpty()) return null; // happens for stopwords
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getPrefixQuerySingle(field, termStr);
|
||||
|
@ -451,11 +471,11 @@ public class MapperQueryParser extends QueryParser {
|
|||
setAnalyzer(context.getSearchAnalyzer(currentFieldType));
|
||||
}
|
||||
Query query = null;
|
||||
if (currentFieldType.useTermQueryWithQueryString()) {
|
||||
if (currentFieldType.tokenized() == false) {
|
||||
query = currentFieldType.prefixQuery(termStr, multiTermRewriteMethod, context);
|
||||
}
|
||||
if (query == null) {
|
||||
query = getPossiblyAnalyzedPrefixQuery(currentFieldType.names().indexName(), termStr);
|
||||
query = getPossiblyAnalyzedPrefixQuery(currentFieldType.name(), termStr);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
@ -474,7 +494,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
if (!settings.analyzeWildcard()) {
|
||||
return super.getPrefixQuery(field, termStr);
|
||||
}
|
||||
List<String> tlist;
|
||||
List<List<String> > tlist;
|
||||
// get Analyzer from superclass and tokenize the term
|
||||
TokenStream source = null;
|
||||
try {
|
||||
|
@ -485,7 +505,9 @@ public class MapperQueryParser extends QueryParser {
|
|||
return super.getPrefixQuery(field, termStr);
|
||||
}
|
||||
tlist = new ArrayList<>();
|
||||
List<String> currentPos = new ArrayList<>();
|
||||
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
|
||||
PositionIncrementAttribute posAtt = source.addAttribute(PositionIncrementAttribute.class);
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
|
@ -493,7 +515,14 @@ public class MapperQueryParser extends QueryParser {
|
|||
} catch (IOException e) {
|
||||
break;
|
||||
}
|
||||
tlist.add(termAtt.toString());
|
||||
if (currentPos.isEmpty() == false && posAtt.getPositionIncrement() > 0) {
|
||||
tlist.add(currentPos);
|
||||
currentPos = new ArrayList<>();
|
||||
}
|
||||
currentPos.add(termAtt.toString());
|
||||
}
|
||||
if (currentPos.isEmpty() == false) {
|
||||
tlist.add(currentPos);
|
||||
}
|
||||
} finally {
|
||||
if (source != null) {
|
||||
|
@ -501,16 +530,45 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
}
|
||||
|
||||
if (tlist.size() == 1) {
|
||||
return super.getPrefixQuery(field, tlist.get(0));
|
||||
} else {
|
||||
// build a boolean query with prefix on each one...
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String token : tlist) {
|
||||
clauses.add(new BooleanClause(super.getPrefixQuery(field, token), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
return getBooleanQuery(clauses, true);
|
||||
if (tlist.size() == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (tlist.size() == 1 && tlist.get(0).size() == 1) {
|
||||
return super.getPrefixQuery(field, tlist.get(0).get(0));
|
||||
}
|
||||
|
||||
// build a boolean query with prefix on the last position only.
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (int pos = 0; pos < tlist.size(); pos++) {
|
||||
List<String> plist = tlist.get(pos);
|
||||
boolean isLastPos = (pos == tlist.size() - 1);
|
||||
Query posQuery;
|
||||
if (plist.size() == 1) {
|
||||
if (isLastPos) {
|
||||
posQuery = super.getPrefixQuery(field, plist.get(0));
|
||||
} else {
|
||||
posQuery = newTermQuery(new Term(field, plist.get(0)));
|
||||
}
|
||||
} else if (isLastPos == false) {
|
||||
// build a synonym query for terms in the same position.
|
||||
Term[] terms = new Term[plist.size()];
|
||||
for (int i = 0; i < plist.size(); i++) {
|
||||
terms[i] = new Term(field, plist.get(i));
|
||||
}
|
||||
posQuery = new SynonymQuery(terms);
|
||||
} else {
|
||||
List<BooleanClause> innerClauses = new ArrayList<>();
|
||||
for (String token : plist) {
|
||||
innerClauses.add(new BooleanClause(super.getPrefixQuery(field, token),
|
||||
BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
posQuery = getBooleanQueryCoordDisabled(innerClauses);
|
||||
}
|
||||
clauses.add(new BooleanClause(posQuery,
|
||||
getDefaultOperator() == Operator.AND ? BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
return getBooleanQuery(clauses);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -541,19 +599,19 @@ public class MapperQueryParser extends QueryParser {
|
|||
return getWildcardQuerySingle(fields.iterator().next(), termStr);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getWildcardQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -562,9 +620,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
if (clauses.isEmpty()) return null; // happens for stopwords
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getWildcardQuerySingle(field, termStr);
|
||||
|
@ -581,7 +638,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
if (!settings.forceAnalyzer()) {
|
||||
setAnalyzer(context.getSearchAnalyzer(currentFieldType));
|
||||
}
|
||||
indexedNameField = currentFieldType.names().indexName();
|
||||
indexedNameField = currentFieldType.name();
|
||||
return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr);
|
||||
}
|
||||
return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr);
|
||||
|
@ -670,19 +727,19 @@ public class MapperQueryParser extends QueryParser {
|
|||
return getRegexpQuerySingle(fields.iterator().next(), termStr);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getRegexpQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -691,9 +748,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
if (clauses.isEmpty()) return null; // happens for stopwords
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getRegexpQuerySingle(field, termStr);
|
||||
|
@ -710,8 +766,9 @@ public class MapperQueryParser extends QueryParser {
|
|||
setAnalyzer(context.getSearchAnalyzer(currentFieldType));
|
||||
}
|
||||
Query query = null;
|
||||
if (currentFieldType.useTermQueryWithQueryString()) {
|
||||
query = currentFieldType.regexpQuery(termStr, RegExp.ALL, maxDeterminizedStates, multiTermRewriteMethod, context);
|
||||
if (currentFieldType.tokenized() == false) {
|
||||
query = currentFieldType.regexpQuery(termStr, RegExp.ALL,
|
||||
maxDeterminizedStates, multiTermRewriteMethod, context);
|
||||
}
|
||||
if (query == null) {
|
||||
query = super.getRegexpQuery(field, termStr);
|
||||
|
@ -729,9 +786,23 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated review all use of this, don't rely on coord
|
||||
*/
|
||||
@Deprecated
|
||||
protected Query getBooleanQueryCoordDisabled(List<BooleanClause> clauses) throws ParseException {
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
builder.setDisableCoord(true);
|
||||
for (BooleanClause clause : clauses) {
|
||||
builder.add(clause);
|
||||
}
|
||||
return fixNegativeQueryIfNeeded(builder.build());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected Query getBooleanQuery(List<BooleanClause> clauses, boolean disableCoord) throws ParseException {
|
||||
Query q = super.getBooleanQuery(clauses, disableCoord);
|
||||
protected Query getBooleanQuery(List<BooleanClause> clauses) throws ParseException {
|
||||
Query q = super.getBooleanQuery(clauses);
|
||||
if (q == null) {
|
||||
return null;
|
||||
}
|
||||
|
@ -758,12 +829,12 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
pq = builder.build();
|
||||
//make sure that the boost hasn't been set beforehand, otherwise we'd lose it
|
||||
assert q.getBoost() == 1f;
|
||||
assert q instanceof BoostQuery == false;
|
||||
return pq;
|
||||
} else if (q instanceof MultiPhraseQuery) {
|
||||
((MultiPhraseQuery) q).setSlop(slop);
|
||||
return q;
|
||||
MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder((MultiPhraseQuery) q);
|
||||
builder.setSlop(slop);
|
||||
return builder.build();
|
||||
} else {
|
||||
return q;
|
||||
}
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.queryparser.classic;
|
||||
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.index.query.MissingQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class MissingFieldQueryExtension implements FieldQueryExtension {
|
||||
|
||||
public static final String NAME = "_missing_";
|
||||
|
||||
@Override
|
||||
public Query query(QueryShardContext context, String queryText) {
|
||||
Query query = MissingQueryBuilder.newFilter(context, queryText, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE, MissingQueryBuilder.DEFAULT_NULL_VALUE);
|
||||
if (query != null) {
|
||||
return new ConstantScoreQuery(query);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -26,8 +26,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* Abstract decorator class of a DocIdSetIterator
|
||||
* implementation that provides on-demand filter/validation
|
||||
* mechanism on an underlying DocIdSetIterator. See {@link
|
||||
* FilteredDocIdSet}.
|
||||
* mechanism on an underlying DocIdSetIterator.
|
||||
*/
|
||||
public abstract class XFilteredDocIdSetIterator extends DocIdSetIterator {
|
||||
protected DocIdSetIterator _innerIter;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,267 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.lucene.search.suggest.analyzing;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStreamToAutomaton;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IntsRef;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
import org.apache.lucene.util.automaton.Automata;
|
||||
import org.apache.lucene.util.automaton.Automaton;
|
||||
import org.apache.lucene.util.automaton.FiniteStringsIterator;
|
||||
import org.apache.lucene.util.automaton.LevenshteinAutomata;
|
||||
import org.apache.lucene.util.automaton.Operations;
|
||||
import org.apache.lucene.util.automaton.UTF32ToUTF8;
|
||||
import org.apache.lucene.util.fst.FST;
|
||||
import org.apache.lucene.util.fst.PairOutputs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES;
|
||||
|
||||
/**
|
||||
* Implements a fuzzy {@link AnalyzingSuggester}. The similarity measurement is
|
||||
* based on the Damerau-Levenshtein (optimal string alignment) algorithm, though
|
||||
* you can explicitly choose classic Levenshtein by passing <code>false</code>
|
||||
* for the <code>transpositions</code> parameter.
|
||||
* <p>
|
||||
* At most, this query will match terms up to
|
||||
* {@value org.apache.lucene.util.automaton.LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE}
|
||||
* edits. Higher distances are not supported. Note that the
|
||||
* fuzzy distance is measured in "byte space" on the bytes
|
||||
* returned by the {@link org.apache.lucene.analysis.TokenStream}'s {@link
|
||||
* org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute}, usually UTF8. By default
|
||||
* the analyzed bytes must be at least 3 {@link
|
||||
* #DEFAULT_MIN_FUZZY_LENGTH} bytes before any edits are
|
||||
* considered. Furthermore, the first 1 {@link
|
||||
* #DEFAULT_NON_FUZZY_PREFIX} byte is not allowed to be
|
||||
* edited. We allow up to 1 (@link
|
||||
* #DEFAULT_MAX_EDITS} edit.
|
||||
* If {@link #unicodeAware} parameter in the constructor is set to true, maxEdits,
|
||||
* minFuzzyLength, transpositions and nonFuzzyPrefix are measured in Unicode code
|
||||
* points (actual letters) instead of bytes.*
|
||||
*
|
||||
* <p>
|
||||
* NOTE: This suggester does not boost suggestions that
|
||||
* required no edits over suggestions that did require
|
||||
* edits. This is a known limitation.
|
||||
*
|
||||
* <p>
|
||||
* Note: complex query analyzers can have a significant impact on the lookup
|
||||
* performance. It's recommended to not use analyzers that drop or inject terms
|
||||
* like synonyms to keep the complexity of the prefix intersection low for good
|
||||
* lookup performance. At index time, complex analyzers can safely be used.
|
||||
* </p>
|
||||
*/
|
||||
public final class XFuzzySuggester extends XAnalyzingSuggester {
|
||||
private final int maxEdits;
|
||||
private final boolean transpositions;
|
||||
private final int nonFuzzyPrefix;
|
||||
private final int minFuzzyLength;
|
||||
private final boolean unicodeAware;
|
||||
|
||||
/**
|
||||
* Measure maxEdits, minFuzzyLength, transpositions and nonFuzzyPrefix
|
||||
* parameters in Unicode code points (actual letters)
|
||||
* instead of bytes.
|
||||
*/
|
||||
public static final boolean DEFAULT_UNICODE_AWARE = false;
|
||||
|
||||
/**
|
||||
* The default minimum length of the key passed to {@link
|
||||
* #lookup} before any edits are allowed.
|
||||
*/
|
||||
public static final int DEFAULT_MIN_FUZZY_LENGTH = 3;
|
||||
|
||||
/**
|
||||
* The default prefix length where edits are not allowed.
|
||||
*/
|
||||
public static final int DEFAULT_NON_FUZZY_PREFIX = 1;
|
||||
|
||||
/**
|
||||
* The default maximum number of edits for fuzzy
|
||||
* suggestions.
|
||||
*/
|
||||
public static final int DEFAULT_MAX_EDITS = 1;
|
||||
|
||||
/**
|
||||
* The default transposition value passed to {@link org.apache.lucene.util.automaton.LevenshteinAutomata}
|
||||
*/
|
||||
public static final boolean DEFAULT_TRANSPOSITIONS = true;
|
||||
|
||||
/**
|
||||
* Creates a {@link FuzzySuggester} instance initialized with default values.
|
||||
*
|
||||
* @param analyzer the analyzer used for this suggester
|
||||
*/
|
||||
public XFuzzySuggester(Analyzer analyzer) {
|
||||
this(analyzer, analyzer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link FuzzySuggester} instance with an index & a query analyzer initialized with default values.
|
||||
*
|
||||
* @param indexAnalyzer
|
||||
* Analyzer that will be used for analyzing suggestions while building the index.
|
||||
* @param queryAnalyzer
|
||||
* Analyzer that will be used for analyzing query text during lookup
|
||||
*/
|
||||
public XFuzzySuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer) {
|
||||
this(indexAnalyzer, null, queryAnalyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1,
|
||||
DEFAULT_MAX_EDITS, DEFAULT_TRANSPOSITIONS,
|
||||
DEFAULT_NON_FUZZY_PREFIX, DEFAULT_MIN_FUZZY_LENGTH, DEFAULT_UNICODE_AWARE,
|
||||
null, false, 0, SEP_LABEL, PAYLOAD_SEP, END_BYTE, HOLE_CHARACTER);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link FuzzySuggester} instance.
|
||||
*
|
||||
* @param indexAnalyzer Analyzer that will be used for
|
||||
* analyzing suggestions while building the index.
|
||||
* @param queryAnalyzer Analyzer that will be used for
|
||||
* analyzing query text during lookup
|
||||
* @param options see {@link #EXACT_FIRST}, {@link #PRESERVE_SEP}
|
||||
* @param maxSurfaceFormsPerAnalyzedForm Maximum number of
|
||||
* surface forms to keep for a single analyzed form.
|
||||
* When there are too many surface forms we discard the
|
||||
* lowest weighted ones.
|
||||
* @param maxGraphExpansions Maximum number of graph paths
|
||||
* to expand from the analyzed form. Set this to -1 for
|
||||
* no limit.
|
||||
* @param maxEdits must be >= 0 and <= {@link org.apache.lucene.util.automaton.LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE} .
|
||||
* @param transpositions <code>true</code> if transpositions should be treated as a primitive
|
||||
* edit operation. If this is false, comparisons will implement the classic
|
||||
* Levenshtein algorithm.
|
||||
* @param nonFuzzyPrefix length of common (non-fuzzy) prefix (see default {@link #DEFAULT_NON_FUZZY_PREFIX}
|
||||
* @param minFuzzyLength minimum length of lookup key before any edits are allowed (see default {@link #DEFAULT_MIN_FUZZY_LENGTH})
|
||||
* @param sepLabel separation label
|
||||
* @param payloadSep payload separator byte
|
||||
* @param endByte end byte marker byte
|
||||
*/
|
||||
public XFuzzySuggester(Analyzer indexAnalyzer, Automaton queryPrefix, Analyzer queryAnalyzer,
|
||||
int options, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions,
|
||||
int maxEdits, boolean transpositions, int nonFuzzyPrefix, int minFuzzyLength,
|
||||
boolean unicodeAware, FST<PairOutputs.Pair<Long, BytesRef>> fst, boolean hasPayloads,
|
||||
int maxAnalyzedPathsForOneInput, int sepLabel, int payloadSep, int endByte, int holeCharacter) {
|
||||
super(indexAnalyzer, queryPrefix, queryAnalyzer, options, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions,
|
||||
true, fst, hasPayloads, maxAnalyzedPathsForOneInput, sepLabel, payloadSep, endByte, holeCharacter);
|
||||
if (maxEdits < 0 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
|
||||
throw new IllegalArgumentException(
|
||||
"maxEdits must be between 0 and " + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE);
|
||||
}
|
||||
if (nonFuzzyPrefix < 0) {
|
||||
throw new IllegalArgumentException("nonFuzzyPrefix must not be >= 0 (got " + nonFuzzyPrefix + ")");
|
||||
}
|
||||
if (minFuzzyLength < 0) {
|
||||
throw new IllegalArgumentException("minFuzzyLength must not be >= 0 (got " + minFuzzyLength + ")");
|
||||
}
|
||||
|
||||
this.maxEdits = maxEdits;
|
||||
this.transpositions = transpositions;
|
||||
this.nonFuzzyPrefix = nonFuzzyPrefix;
|
||||
this.minFuzzyLength = minFuzzyLength;
|
||||
this.unicodeAware = unicodeAware;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<FSTUtil.Path<PairOutputs.Pair<Long,BytesRef>>> getFullPrefixPaths(
|
||||
List<FSTUtil.Path<PairOutputs.Pair<Long,BytesRef>>> prefixPaths, Automaton lookupAutomaton,
|
||||
FST<PairOutputs.Pair<Long,BytesRef>> fst)
|
||||
throws IOException {
|
||||
|
||||
// TODO: right now there's no penalty for fuzzy/edits,
|
||||
// ie a completion whose prefix matched exactly what the
|
||||
// user typed gets no boost over completions that
|
||||
// required an edit, which get no boost over completions
|
||||
// requiring two edits. I suspect a multiplicative
|
||||
// factor is appropriate (eg, say a fuzzy match must be at
|
||||
// least 2X better weight than the non-fuzzy match to
|
||||
// "compete") ... in which case I think the wFST needs
|
||||
// to be log weights or something ...
|
||||
|
||||
Automaton levA = convertAutomaton(toLevenshteinAutomata(lookupAutomaton));
|
||||
/*
|
||||
Writer w = new OutputStreamWriter(new FileOutputStream("out.dot"), "UTF-8");
|
||||
w.write(levA.toDot());
|
||||
w.close();
|
||||
System.out.println("Wrote LevA to out.dot");
|
||||
*/
|
||||
return FSTUtil.intersectPrefixPaths(levA, fst);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Automaton convertAutomaton(Automaton a) {
|
||||
if (unicodeAware) {
|
||||
// FLORIAN EDIT: get converted Automaton from superclass
|
||||
Automaton utf8automaton = new UTF32ToUTF8().convert(super.convertAutomaton(a));
|
||||
// This automaton should not blow up during determinize:
|
||||
utf8automaton = Operations.determinize(utf8automaton, Integer.MAX_VALUE);
|
||||
return utf8automaton;
|
||||
} else {
|
||||
return super.convertAutomaton(a);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStreamToAutomaton getTokenStreamToAutomaton() {
|
||||
final TokenStreamToAutomaton tsta = super.getTokenStreamToAutomaton();
|
||||
tsta.setUnicodeArcs(unicodeAware);
|
||||
return tsta;
|
||||
}
|
||||
|
||||
Automaton toLevenshteinAutomata(Automaton automaton) {
|
||||
List<Automaton> subs = new ArrayList<>();
|
||||
FiniteStringsIterator finiteStrings = new FiniteStringsIterator(automaton);
|
||||
for (IntsRef string; (string = finiteStrings.next()) != null;) {
|
||||
if (string.length <= nonFuzzyPrefix || string.length < minFuzzyLength) {
|
||||
subs.add(Automata.makeString(string.ints, string.offset, string.length));
|
||||
} else {
|
||||
int ints[] = new int[string.length-nonFuzzyPrefix];
|
||||
System.arraycopy(string.ints, string.offset+nonFuzzyPrefix, ints, 0, ints.length);
|
||||
// TODO: maybe add alphaMin to LevenshteinAutomata,
|
||||
// and pass 1 instead of 0? We probably don't want
|
||||
// to allow the trailing dedup bytes to be
|
||||
// edited... but then 0 byte is "in general" allowed
|
||||
// on input (but not in UTF8).
|
||||
LevenshteinAutomata lev = new LevenshteinAutomata(
|
||||
ints, unicodeAware ? Character.MAX_CODE_POINT : 255, transpositions);
|
||||
subs.add(lev.toAutomaton(maxEdits, UnicodeUtil.newString(string.ints, string.offset, nonFuzzyPrefix)));
|
||||
}
|
||||
}
|
||||
|
||||
if (subs.isEmpty()) {
|
||||
// automaton is empty, there is no accepted paths through it
|
||||
return Automata.makeEmpty(); // matches nothing
|
||||
} else if (subs.size() == 1) {
|
||||
// no synonyms or anything: just a single path through the tokenstream
|
||||
return subs.get(0);
|
||||
} else {
|
||||
// multiple paths: this is really scary! is it slow?
|
||||
// maybe we should not do this and throw UOE?
|
||||
Automaton a = Operations.union(subs);
|
||||
// TODO: we could call toLevenshteinAutomata() before det?
|
||||
// this only happens if you have multiple paths anyway (e.g. synonyms)
|
||||
return Operations.determinize(a, DEFAULT_MAX_DETERMINIZED_STATES);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -22,7 +22,11 @@ package org.apache.lucene.search.vectorhighlight;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.BlendedTermQuery;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.MultiPhraseQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
|
||||
|
@ -30,7 +34,6 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -64,7 +67,7 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
flatten(((FiltersFunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof MultiPhraseQuery) {
|
||||
MultiPhraseQuery q = ((MultiPhraseQuery) sourceQuery);
|
||||
convertMultiPhraseQuery(0, new int[q.getTermArrays().size()], q, q.getTermArrays(), q.getPositions(), reader, flatQueries);
|
||||
convertMultiPhraseQuery(0, new int[q.getTermArrays().length], q, q.getTermArrays(), q.getPositions(), reader, flatQueries);
|
||||
} else if (sourceQuery instanceof BlendedTermQuery) {
|
||||
final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;
|
||||
flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);
|
||||
|
@ -72,10 +75,10 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
super.flatten(sourceQuery, reader, flatQueries, boost);
|
||||
}
|
||||
}
|
||||
|
||||
private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, List<Term[]> terms, int[] pos, IndexReader reader, Collection<Query> flatQueries) throws IOException {
|
||||
|
||||
private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, Term[][] terms, int[] pos, IndexReader reader, Collection<Query> flatQueries) throws IOException {
|
||||
if (currentPos == 0) {
|
||||
// if we have more than 16 terms
|
||||
// if we have more than 16 terms
|
||||
int numTerms = 0;
|
||||
for (Term[] currentPosTerm : terms) {
|
||||
numTerms += currentPosTerm.length;
|
||||
|
@ -83,7 +86,7 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
if (numTerms > 16) {
|
||||
for (Term[] currentPosTerm : terms) {
|
||||
for (Term term : currentPosTerm) {
|
||||
super.flatten(new TermQuery(term), reader, flatQueries, orig.getBoost());
|
||||
super.flatten(new TermQuery(term), reader, flatQueries, 1F);
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
@ -93,16 +96,16 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
* we walk all possible ways and for each path down the MPQ we create a PhraseQuery this is what FieldQuery supports.
|
||||
* It seems expensive but most queries will pretty small.
|
||||
*/
|
||||
if (currentPos == terms.size()) {
|
||||
if (currentPos == terms.length) {
|
||||
PhraseQuery.Builder queryBuilder = new PhraseQuery.Builder();
|
||||
queryBuilder.setSlop(orig.getSlop());
|
||||
for (int i = 0; i < termsIdx.length; i++) {
|
||||
queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]);
|
||||
queryBuilder.add(terms[i][termsIdx[i]], pos[i]);
|
||||
}
|
||||
Query query = queryBuilder.build();
|
||||
this.flatten(query, reader, flatQueries, orig.getBoost());
|
||||
this.flatten(query, reader, flatQueries, 1F);
|
||||
} else {
|
||||
Term[] t = terms.get(currentPos);
|
||||
Term[] t = terms[currentPos];
|
||||
for (int i = 0; i < t.length; i++) {
|
||||
termsIdx[currentPos] = i;
|
||||
convertMultiPhraseQuery(currentPos+1, termsIdx, orig, terms, pos, reader, flatQueries);
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.spatial.geopoint.search;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField.TermEncoding;
|
||||
|
||||
/** Implements a point distance range query on a GeoPoint field. This is based on
|
||||
* {@code org.apache.lucene.spatial.geopoint.search.GeoPointDistanceQuery} and is implemented using a
|
||||
* {@code org.apache.lucene.search.BooleanClause.MUST_NOT} clause to exclude any points that fall within
|
||||
* minRadiusMeters from the provided point.
|
||||
* <p>
|
||||
* NOTE: this query does not correctly support multi-value docs (see: https://issues.apache.org/jira/browse/LUCENE-7126)
|
||||
* <br>
|
||||
* TODO: remove this per ISSUE #17658
|
||||
**/
|
||||
public final class XGeoPointDistanceRangeQuery extends GeoPointDistanceQuery {
|
||||
/** minimum distance range (in meters) from lat, lon center location, maximum is inherited */
|
||||
protected final double minRadiusMeters;
|
||||
|
||||
/**
|
||||
* Constructs a query for all {@link org.apache.lucene.spatial.geopoint.document.GeoPointField} types within a minimum / maximum
|
||||
* distance (in meters) range from a given point
|
||||
*/
|
||||
public XGeoPointDistanceRangeQuery(final String field, final double centerLat, final double centerLon,
|
||||
final double minRadiusMeters, final double maxRadiusMeters) {
|
||||
this(field, TermEncoding.PREFIX, centerLat, centerLon, minRadiusMeters, maxRadiusMeters);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a query for all {@link org.apache.lucene.spatial.geopoint.document.GeoPointField} types within a minimum / maximum
|
||||
* distance (in meters) range from a given point. Accepts an optional
|
||||
* {@link org.apache.lucene.spatial.geopoint.document.GeoPointField.TermEncoding}
|
||||
*/
|
||||
public XGeoPointDistanceRangeQuery(final String field, final TermEncoding termEncoding, final double centerLat, final double centerLon,
|
||||
final double minRadiusMeters, final double maxRadius) {
|
||||
super(field, termEncoding, centerLat, centerLon, maxRadius);
|
||||
this.minRadiusMeters = minRadiusMeters;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rewrite(IndexReader reader) {
|
||||
Query q = super.rewrite(reader);
|
||||
if (minRadiusMeters == 0.0) {
|
||||
return q;
|
||||
}
|
||||
|
||||
// add an exclusion query
|
||||
BooleanQuery.Builder bqb = new BooleanQuery.Builder();
|
||||
|
||||
// create a new exclusion query
|
||||
GeoPointDistanceQuery exclude = new GeoPointDistanceQuery(field, termEncoding, centerLat, centerLon, minRadiusMeters);
|
||||
// full map search
|
||||
// if (radiusMeters >= GeoProjectionUtils.SEMIMINOR_AXIS) {
|
||||
// bqb.add(new BooleanClause(new GeoPointInBBoxQuery(this.field, -180.0, -90.0, 180.0, 90.0), BooleanClause.Occur.MUST));
|
||||
// } else {
|
||||
bqb.add(new BooleanClause(q, BooleanClause.Occur.MUST));
|
||||
// }
|
||||
bqb.add(new BooleanClause(exclude, BooleanClause.Occur.MUST_NOT));
|
||||
|
||||
return bqb.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
sb.append(getClass().getSimpleName());
|
||||
sb.append(':');
|
||||
if (!this.field.equals(field)) {
|
||||
sb.append(" field=");
|
||||
sb.append(this.field);
|
||||
sb.append(':');
|
||||
}
|
||||
return sb.append( " Center: [")
|
||||
.append(centerLat)
|
||||
.append(',')
|
||||
.append(centerLon)
|
||||
.append(']')
|
||||
.append(" From Distance: ")
|
||||
.append(minRadiusMeters)
|
||||
.append(" m")
|
||||
.append(" To Distance: ")
|
||||
.append(radiusMeters)
|
||||
.append(" m")
|
||||
.append(" Lower Left: [")
|
||||
.append(minLat)
|
||||
.append(',')
|
||||
.append(minLon)
|
||||
.append(']')
|
||||
.append(" Upper Right: [")
|
||||
.append(maxLat)
|
||||
.append(',')
|
||||
.append(maxLon)
|
||||
.append("]")
|
||||
.toString();
|
||||
}
|
||||
|
||||
/** getter method for minimum distance */
|
||||
public double getMinRadiusMeters() {
|
||||
return this.minRadiusMeters;
|
||||
}
|
||||
|
||||
/** getter method for maximum distance */
|
||||
public double getMaxRadiusMeters() {
|
||||
return this.radiusMeters;
|
||||
}
|
||||
}
|
|
@ -45,6 +45,7 @@ public class Build {
|
|||
static {
|
||||
final String shortHash;
|
||||
final String date;
|
||||
final boolean isSnapshot;
|
||||
|
||||
Path path = getElasticsearchCodebase();
|
||||
if (path.toString().endsWith(".jar")) {
|
||||
|
@ -52,6 +53,7 @@ public class Build {
|
|||
Manifest manifest = jar.getManifest();
|
||||
shortHash = manifest.getMainAttributes().getValue("Change");
|
||||
date = manifest.getMainAttributes().getValue("Build-Date");
|
||||
isSnapshot = "true".equals(manifest.getMainAttributes().getValue("X-Compile-Elasticsearch-Snapshot"));
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
@ -59,6 +61,7 @@ public class Build {
|
|||
// not running from a jar (unit tests, IDE)
|
||||
shortHash = "Unknown";
|
||||
date = "Unknown";
|
||||
isSnapshot = true;
|
||||
}
|
||||
if (shortHash == null) {
|
||||
throw new IllegalStateException("Error finding the build shortHash. " +
|
||||
|
@ -69,9 +72,11 @@ public class Build {
|
|||
"Stopping Elasticsearch now so it doesn't run in subtly broken ways. This is likely a build bug.");
|
||||
}
|
||||
|
||||
CURRENT = new Build(shortHash, date);
|
||||
CURRENT = new Build(shortHash, date, isSnapshot);
|
||||
}
|
||||
|
||||
private final boolean isSnapshot;
|
||||
|
||||
/**
|
||||
* Returns path to elasticsearch codebase path
|
||||
*/
|
||||
|
@ -88,9 +93,10 @@ public class Build {
|
|||
private String shortHash;
|
||||
private String date;
|
||||
|
||||
Build(String shortHash, String date) {
|
||||
Build(String shortHash, String date, boolean isSnapshot) {
|
||||
this.shortHash = shortHash;
|
||||
this.date = date;
|
||||
this.isSnapshot = isSnapshot;
|
||||
}
|
||||
|
||||
public String shortHash() {
|
||||
|
@ -104,16 +110,51 @@ public class Build {
|
|||
public static Build readBuild(StreamInput in) throws IOException {
|
||||
String hash = in.readString();
|
||||
String date = in.readString();
|
||||
return new Build(hash, date);
|
||||
boolean snapshot = in.readBoolean();
|
||||
return new Build(hash, date, snapshot);
|
||||
}
|
||||
|
||||
public static void writeBuild(Build build, StreamOutput out) throws IOException {
|
||||
out.writeString(build.shortHash());
|
||||
out.writeString(build.date());
|
||||
out.writeBoolean(build.isSnapshot());
|
||||
}
|
||||
|
||||
public boolean isSnapshot() {
|
||||
return isSnapshot;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + shortHash + "][" + date + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Build build = (Build) o;
|
||||
|
||||
if (isSnapshot != build.isSnapshot) {
|
||||
return false;
|
||||
}
|
||||
if (!shortHash.equals(build.shortHash)) {
|
||||
return false;
|
||||
}
|
||||
return date.equals(build.date);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = (isSnapshot ? 1 : 0);
|
||||
result = 31 * result + shortHash.hashCode();
|
||||
result = 31 * result + date.hashCode();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,10 +19,12 @@
|
|||
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.elasticsearch.action.support.replication.ReplicationOperation;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
@ -30,9 +32,18 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||
|
||||
/**
|
||||
* A base class for all elasticsearch exceptions.
|
||||
*/
|
||||
|
@ -43,6 +54,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true;
|
||||
public static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false;
|
||||
private static final String INDEX_HEADER_KEY = "es.index";
|
||||
private static final String INDEX_HEADER_KEY_UUID = "es.index_uuid";
|
||||
private static final String SHARD_HEADER_KEY = "es.shard";
|
||||
private static final String RESOURCE_HEADER_TYPE_KEY = "es.resource.type";
|
||||
private static final String RESOURCE_HEADER_ID_KEY = "es.resource.id";
|
||||
|
@ -64,7 +76,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
* The message can be parameterized using <code>{}</code> as placeholders for the given
|
||||
* arguments
|
||||
*
|
||||
* @param msg the detail message
|
||||
* @param msg the detail message
|
||||
* @param args the arguments for the message
|
||||
*/
|
||||
public ElasticsearchException(String msg, Object... args) {
|
||||
|
@ -196,7 +208,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
* @param exType the exception type to look for
|
||||
* @return whether there is a nested exception of the specified type
|
||||
*/
|
||||
public boolean contains(Class exType) {
|
||||
public boolean contains(Class<? extends Throwable> exType) {
|
||||
if (exType == null) {
|
||||
return false;
|
||||
}
|
||||
|
@ -246,7 +258,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
}
|
||||
|
||||
/**
|
||||
* Retruns <code>true</code> iff the given class is a registered for an exception to be read.
|
||||
* Returns <code>true</code> iff the given class is a registered for an exception to be read.
|
||||
*/
|
||||
public static boolean isRegistered(Class<? extends Throwable> exception) {
|
||||
return CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE.containsKey(exception);
|
||||
|
@ -326,7 +338,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
|
||||
private void xContentHeader(XContentBuilder builder, String key, List<String> values) throws IOException {
|
||||
if (values != null && values.isEmpty() == false) {
|
||||
if(values.size() == 1) {
|
||||
if (values.size() == 1) {
|
||||
builder.field(key, values.get(0));
|
||||
} else {
|
||||
builder.startArray(key);
|
||||
|
@ -361,18 +373,18 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the root cause of this exception or mupltiple if different shards caused different exceptions
|
||||
* Returns the root cause of this exception or multiple if different shards caused different exceptions
|
||||
*/
|
||||
public ElasticsearchException[] guessRootCauses() {
|
||||
final Throwable cause = getCause();
|
||||
if (cause != null && cause instanceof ElasticsearchException) {
|
||||
return ((ElasticsearchException) cause).guessRootCauses();
|
||||
}
|
||||
return new ElasticsearchException[] {this};
|
||||
return new ElasticsearchException[]{this};
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the root cause of this exception or mupltiple if different shards caused different exceptions.
|
||||
* Returns the root cause of this exception or multiple if different shards caused different exceptions.
|
||||
* If the given exception is not an instance of {@link org.elasticsearch.ElasticsearchException} an empty array
|
||||
* is returned.
|
||||
*/
|
||||
|
@ -381,7 +393,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
if (ex instanceof ElasticsearchException) {
|
||||
return ((ElasticsearchException) ex).guessRootCauses();
|
||||
}
|
||||
return new ElasticsearchException[] {new ElasticsearchException(t.getMessage(), t) {
|
||||
return new ElasticsearchException[]{new ElasticsearchException(t.getMessage(), t) {
|
||||
@Override
|
||||
protected String getExceptionName() {
|
||||
return getExceptionName(getCause());
|
||||
|
@ -401,14 +413,15 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
if (simpleName.startsWith("Elasticsearch")) {
|
||||
simpleName = simpleName.substring("Elasticsearch".length());
|
||||
}
|
||||
return Strings.toUnderscoreCase(simpleName);
|
||||
// TODO: do we really need to make the exception name in underscore casing?
|
||||
return toUnderscoreCase(simpleName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
if (headers.containsKey(INDEX_HEADER_KEY)) {
|
||||
builder.append('[').append(getIndex()).append(']');
|
||||
builder.append(getIndex());
|
||||
if (headers.containsKey(SHARD_HEADER_KEY)) {
|
||||
builder.append('[').append(getShardId()).append(']');
|
||||
}
|
||||
|
@ -429,7 +442,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
final String fileName = in.readOptionalString();
|
||||
final String methodName = in.readString();
|
||||
final int lineNumber = in.readVInt();
|
||||
stackTrace[i] = new StackTraceElement(declaringClasss,methodName, fileName, lineNumber);
|
||||
stackTrace[i] = new StackTraceElement(declaringClasss, methodName, fileName, lineNumber);
|
||||
}
|
||||
throwable.setStackTrace(stackTrace);
|
||||
|
||||
|
@ -460,157 +473,285 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
return throwable;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the list of Exceptions Elasticsearch can throw over the wire or save into a corruption marker. Each value in the enum is a
|
||||
* single exception tying the Class to an id for use of the encode side and the id back to a constructor for use on the decode side. As
|
||||
* such its ok if the exceptions to change names so long as their constructor can still read the exception. Each exception is listed
|
||||
* in id order below. If you want to remove an exception leave a tombstone comment and mark the id as null in
|
||||
* ExceptionSerializationTests.testIds.ids.
|
||||
*/
|
||||
enum ElasticsearchExceptionHandle {
|
||||
// each exception gets an assigned id that must never change. While the exception name can
|
||||
// change due to refactorings etc. like renaming we have to keep the ordinal <--> class mapping
|
||||
// to deserialize the exception coming from another node or from an corruption marker on
|
||||
// a corrupted index.
|
||||
// these exceptions can be ordered and removed, but (repeating) the ids must never change
|
||||
// to remove an exception, remove the enum value below, and mark the id as null in ExceptionSerializationTests.testIds.ids
|
||||
INDEX_SHARD_SNAPSHOT_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class, org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0),
|
||||
DFS_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class, org.elasticsearch.search.dfs.DfsPhaseExecutionException::new, 1),
|
||||
EXECUTION_CANCELLED_EXCEPTION(org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class, org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException::new, 2),
|
||||
MASTER_NOT_DISCOVERED_EXCEPTION(org.elasticsearch.discovery.MasterNotDiscoveredException.class, org.elasticsearch.discovery.MasterNotDiscoveredException::new, 3),
|
||||
ELASTICSEARCH_SECURITY_EXCEPTION(org.elasticsearch.ElasticsearchSecurityException.class, org.elasticsearch.ElasticsearchSecurityException::new, 4),
|
||||
INDEX_SHARD_RESTORE_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreException.class, org.elasticsearch.index.snapshots.IndexShardRestoreException::new, 5),
|
||||
INDEX_CLOSED_EXCEPTION(org.elasticsearch.indices.IndexClosedException.class, org.elasticsearch.indices.IndexClosedException::new, 6),
|
||||
BIND_HTTP_EXCEPTION(org.elasticsearch.http.BindHttpException.class, org.elasticsearch.http.BindHttpException::new, 7),
|
||||
REDUCE_SEARCH_PHASE_EXCEPTION(org.elasticsearch.action.search.ReduceSearchPhaseException.class, org.elasticsearch.action.search.ReduceSearchPhaseException::new, 8),
|
||||
NODE_CLOSED_EXCEPTION(org.elasticsearch.node.NodeClosedException.class, org.elasticsearch.node.NodeClosedException::new, 9),
|
||||
SNAPSHOT_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.SnapshotFailedEngineException.class, org.elasticsearch.index.engine.SnapshotFailedEngineException::new, 10),
|
||||
SHARD_NOT_FOUND_EXCEPTION(org.elasticsearch.index.shard.ShardNotFoundException.class, org.elasticsearch.index.shard.ShardNotFoundException::new, 11),
|
||||
CONNECT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ConnectTransportException.class, org.elasticsearch.transport.ConnectTransportException::new, 12),
|
||||
NOT_SERIALIZABLE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.NotSerializableTransportException.class, org.elasticsearch.transport.NotSerializableTransportException::new, 13),
|
||||
RESPONSE_HANDLER_FAILURE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ResponseHandlerFailureTransportException.class, org.elasticsearch.transport.ResponseHandlerFailureTransportException::new, 14),
|
||||
INDEX_CREATION_EXCEPTION(org.elasticsearch.indices.IndexCreationException.class, org.elasticsearch.indices.IndexCreationException::new, 15),
|
||||
INDEX_NOT_FOUND_EXCEPTION(org.elasticsearch.index.IndexNotFoundException.class, org.elasticsearch.index.IndexNotFoundException::new, 16),
|
||||
ILLEGAL_SHARD_ROUTING_STATE_EXCEPTION(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException.class, org.elasticsearch.cluster.routing.IllegalShardRoutingStateException::new, 17),
|
||||
BROADCAST_SHARD_OPERATION_FAILED_EXCEPTION(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException.class, org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException::new, 18),
|
||||
RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class, org.elasticsearch.ResourceNotFoundException::new, 19),
|
||||
ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class, org.elasticsearch.transport.ActionTransportException::new, 20),
|
||||
ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class, org.elasticsearch.ElasticsearchGenerationException::new, 21),
|
||||
INDEX_SHARD_SNAPSHOT_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0),
|
||||
DFS_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class,
|
||||
org.elasticsearch.search.dfs.DfsPhaseExecutionException::new, 1),
|
||||
EXECUTION_CANCELLED_EXCEPTION(org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class,
|
||||
org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException::new, 2),
|
||||
MASTER_NOT_DISCOVERED_EXCEPTION(org.elasticsearch.discovery.MasterNotDiscoveredException.class,
|
||||
org.elasticsearch.discovery.MasterNotDiscoveredException::new, 3),
|
||||
ELASTICSEARCH_SECURITY_EXCEPTION(org.elasticsearch.ElasticsearchSecurityException.class,
|
||||
org.elasticsearch.ElasticsearchSecurityException::new, 4),
|
||||
INDEX_SHARD_RESTORE_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardRestoreException::new, 5),
|
||||
INDEX_CLOSED_EXCEPTION(org.elasticsearch.indices.IndexClosedException.class,
|
||||
org.elasticsearch.indices.IndexClosedException::new, 6),
|
||||
BIND_HTTP_EXCEPTION(org.elasticsearch.http.BindHttpException.class,
|
||||
org.elasticsearch.http.BindHttpException::new, 7),
|
||||
REDUCE_SEARCH_PHASE_EXCEPTION(org.elasticsearch.action.search.ReduceSearchPhaseException.class,
|
||||
org.elasticsearch.action.search.ReduceSearchPhaseException::new, 8),
|
||||
NODE_CLOSED_EXCEPTION(org.elasticsearch.node.NodeClosedException.class,
|
||||
org.elasticsearch.node.NodeClosedException::new, 9),
|
||||
SNAPSHOT_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.SnapshotFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.SnapshotFailedEngineException::new, 10),
|
||||
SHARD_NOT_FOUND_EXCEPTION(org.elasticsearch.index.shard.ShardNotFoundException.class,
|
||||
org.elasticsearch.index.shard.ShardNotFoundException::new, 11),
|
||||
CONNECT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ConnectTransportException.class,
|
||||
org.elasticsearch.transport.ConnectTransportException::new, 12),
|
||||
NOT_SERIALIZABLE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.NotSerializableTransportException.class,
|
||||
org.elasticsearch.transport.NotSerializableTransportException::new, 13),
|
||||
RESPONSE_HANDLER_FAILURE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ResponseHandlerFailureTransportException.class,
|
||||
org.elasticsearch.transport.ResponseHandlerFailureTransportException::new, 14),
|
||||
INDEX_CREATION_EXCEPTION(org.elasticsearch.indices.IndexCreationException.class,
|
||||
org.elasticsearch.indices.IndexCreationException::new, 15),
|
||||
INDEX_NOT_FOUND_EXCEPTION(org.elasticsearch.index.IndexNotFoundException.class,
|
||||
org.elasticsearch.index.IndexNotFoundException::new, 16),
|
||||
ILLEGAL_SHARD_ROUTING_STATE_EXCEPTION(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException.class,
|
||||
org.elasticsearch.cluster.routing.IllegalShardRoutingStateException::new, 17),
|
||||
BROADCAST_SHARD_OPERATION_FAILED_EXCEPTION(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException.class,
|
||||
org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException::new, 18),
|
||||
RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class,
|
||||
org.elasticsearch.ResourceNotFoundException::new, 19),
|
||||
ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class,
|
||||
org.elasticsearch.transport.ActionTransportException::new, 20),
|
||||
ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class,
|
||||
org.elasticsearch.ElasticsearchGenerationException::new, 21),
|
||||
// 22 was CreateFailedEngineException
|
||||
INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class, org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
|
||||
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class, org.elasticsearch.search.SearchContextMissingException::new, 24),
|
||||
INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
|
||||
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class,
|
||||
org.elasticsearch.search.SearchContextMissingException::new, 24),
|
||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 25),
|
||||
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class, org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
|
||||
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class, org.elasticsearch.snapshots.SnapshotCreationException::new, 27),
|
||||
DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class, org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),
|
||||
DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class, org.elasticsearch.index.engine.DocumentMissingException::new, 29),
|
||||
SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class, org.elasticsearch.snapshots.SnapshotException::new, 30),
|
||||
INVALID_ALIAS_NAME_EXCEPTION(org.elasticsearch.indices.InvalidAliasNameException.class, org.elasticsearch.indices.InvalidAliasNameException::new, 31),
|
||||
INVALID_INDEX_NAME_EXCEPTION(org.elasticsearch.indices.InvalidIndexNameException.class, org.elasticsearch.indices.InvalidIndexNameException::new, 32),
|
||||
INDEX_PRIMARY_SHARD_NOT_ALLOCATED_EXCEPTION(org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException.class, org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException::new, 33),
|
||||
TRANSPORT_EXCEPTION(org.elasticsearch.transport.TransportException.class, org.elasticsearch.transport.TransportException::new, 34),
|
||||
ELASTICSEARCH_PARSE_EXCEPTION(org.elasticsearch.ElasticsearchParseException.class, org.elasticsearch.ElasticsearchParseException::new, 35),
|
||||
SEARCH_EXCEPTION(org.elasticsearch.search.SearchException.class, org.elasticsearch.search.SearchException::new, 36),
|
||||
MAPPER_EXCEPTION(org.elasticsearch.index.mapper.MapperException.class, org.elasticsearch.index.mapper.MapperException::new, 37),
|
||||
INVALID_TYPE_NAME_EXCEPTION(org.elasticsearch.indices.InvalidTypeNameException.class, org.elasticsearch.indices.InvalidTypeNameException::new, 38),
|
||||
SNAPSHOT_RESTORE_EXCEPTION(org.elasticsearch.snapshots.SnapshotRestoreException.class, org.elasticsearch.snapshots.SnapshotRestoreException::new, 39),
|
||||
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class,
|
||||
org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
|
||||
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class,
|
||||
org.elasticsearch.snapshots.SnapshotCreationException::new, 27),
|
||||
DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),
|
||||
DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class,
|
||||
org.elasticsearch.index.engine.DocumentMissingException::new, 29),
|
||||
SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class,
|
||||
org.elasticsearch.snapshots.SnapshotException::new, 30),
|
||||
INVALID_ALIAS_NAME_EXCEPTION(org.elasticsearch.indices.InvalidAliasNameException.class,
|
||||
org.elasticsearch.indices.InvalidAliasNameException::new, 31),
|
||||
INVALID_INDEX_NAME_EXCEPTION(org.elasticsearch.indices.InvalidIndexNameException.class,
|
||||
org.elasticsearch.indices.InvalidIndexNameException::new, 32),
|
||||
INDEX_PRIMARY_SHARD_NOT_ALLOCATED_EXCEPTION(org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException.class,
|
||||
org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException::new, 33),
|
||||
TRANSPORT_EXCEPTION(org.elasticsearch.transport.TransportException.class,
|
||||
org.elasticsearch.transport.TransportException::new, 34),
|
||||
ELASTICSEARCH_PARSE_EXCEPTION(org.elasticsearch.ElasticsearchParseException.class,
|
||||
org.elasticsearch.ElasticsearchParseException::new, 35),
|
||||
SEARCH_EXCEPTION(org.elasticsearch.search.SearchException.class,
|
||||
org.elasticsearch.search.SearchException::new, 36),
|
||||
MAPPER_EXCEPTION(org.elasticsearch.index.mapper.MapperException.class,
|
||||
org.elasticsearch.index.mapper.MapperException::new, 37),
|
||||
INVALID_TYPE_NAME_EXCEPTION(org.elasticsearch.indices.InvalidTypeNameException.class,
|
||||
org.elasticsearch.indices.InvalidTypeNameException::new, 38),
|
||||
SNAPSHOT_RESTORE_EXCEPTION(org.elasticsearch.snapshots.SnapshotRestoreException.class,
|
||||
org.elasticsearch.snapshots.SnapshotRestoreException::new, 39),
|
||||
PARSING_EXCEPTION(org.elasticsearch.common.ParsingException.class, org.elasticsearch.common.ParsingException::new, 40),
|
||||
INDEX_SHARD_CLOSED_EXCEPTION(org.elasticsearch.index.shard.IndexShardClosedException.class, org.elasticsearch.index.shard.IndexShardClosedException::new, 41),
|
||||
RECOVER_FILES_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class, org.elasticsearch.indices.recovery.RecoverFilesRecoveryException::new, 42),
|
||||
TRUNCATED_TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TruncatedTranslogException.class, org.elasticsearch.index.translog.TruncatedTranslogException::new, 43),
|
||||
RECOVERY_FAILED_EXCEPTION(org.elasticsearch.indices.recovery.RecoveryFailedException.class, org.elasticsearch.indices.recovery.RecoveryFailedException::new, 44),
|
||||
INDEX_SHARD_RELOCATED_EXCEPTION(org.elasticsearch.index.shard.IndexShardRelocatedException.class, org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45),
|
||||
NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class, org.elasticsearch.transport.NodeShouldNotConnectException::new, 46),
|
||||
INDEX_TEMPLATE_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class, org.elasticsearch.indices.IndexTemplateAlreadyExistsException::new, 47),
|
||||
TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class, org.elasticsearch.index.translog.TranslogCorruptedException::new, 48),
|
||||
CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class, org.elasticsearch.cluster.block.ClusterBlockException::new, 49),
|
||||
FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class, org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50),
|
||||
INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class, org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51),
|
||||
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class, org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
|
||||
INDEX_SHARD_CLOSED_EXCEPTION(org.elasticsearch.index.shard.IndexShardClosedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardClosedException::new, 41),
|
||||
RECOVER_FILES_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class,
|
||||
org.elasticsearch.indices.recovery.RecoverFilesRecoveryException::new, 42),
|
||||
TRUNCATED_TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TruncatedTranslogException.class,
|
||||
org.elasticsearch.index.translog.TruncatedTranslogException::new, 43),
|
||||
RECOVERY_FAILED_EXCEPTION(org.elasticsearch.indices.recovery.RecoveryFailedException.class,
|
||||
org.elasticsearch.indices.recovery.RecoveryFailedException::new, 44),
|
||||
INDEX_SHARD_RELOCATED_EXCEPTION(org.elasticsearch.index.shard.IndexShardRelocatedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45),
|
||||
NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class,
|
||||
org.elasticsearch.transport.NodeShouldNotConnectException::new, 46),
|
||||
INDEX_TEMPLATE_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class,
|
||||
org.elasticsearch.indices.IndexTemplateAlreadyExistsException::new, 47),
|
||||
TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class,
|
||||
org.elasticsearch.index.translog.TranslogCorruptedException::new, 48),
|
||||
CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class,
|
||||
org.elasticsearch.cluster.block.ClusterBlockException::new, 49),
|
||||
FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class,
|
||||
org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50),
|
||||
INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class,
|
||||
org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51),
|
||||
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class,
|
||||
org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
|
||||
ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53),
|
||||
// 54 was DocumentAlreadyExistsException, which is superseded by VersionConflictEngineException
|
||||
NO_SUCH_NODE_EXCEPTION(org.elasticsearch.action.NoSuchNodeException.class, org.elasticsearch.action.NoSuchNodeException::new, 55),
|
||||
SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class, org.elasticsearch.common.settings.SettingsException::new, 56),
|
||||
INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class, org.elasticsearch.indices.IndexTemplateMissingException::new, 57),
|
||||
SEND_REQUEST_TRANSPORT_EXCEPTION(org.elasticsearch.transport.SendRequestTransportException.class, org.elasticsearch.transport.SendRequestTransportException::new, 58),
|
||||
ES_REJECTED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class, org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59),
|
||||
EARLY_TERMINATION_EXCEPTION(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class, org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60),
|
||||
ROUTING_VALIDATION_EXCEPTION(org.elasticsearch.cluster.routing.RoutingValidationException.class, org.elasticsearch.cluster.routing.RoutingValidationException::new, 61),
|
||||
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class, org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
|
||||
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class, org.elasticsearch.indices.AliasFilterParsingException::new, 63),
|
||||
SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class,
|
||||
org.elasticsearch.common.settings.SettingsException::new, 56),
|
||||
INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class,
|
||||
org.elasticsearch.indices.IndexTemplateMissingException::new, 57),
|
||||
SEND_REQUEST_TRANSPORT_EXCEPTION(org.elasticsearch.transport.SendRequestTransportException.class,
|
||||
org.elasticsearch.transport.SendRequestTransportException::new, 58),
|
||||
ES_REJECTED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class,
|
||||
org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59),
|
||||
EARLY_TERMINATION_EXCEPTION(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class,
|
||||
org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60),
|
||||
// 61 used to be for RoutingValidationException
|
||||
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class,
|
||||
org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
|
||||
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class,
|
||||
org.elasticsearch.indices.AliasFilterParsingException::new, 63),
|
||||
// 64 was DeleteByQueryFailedEngineException, which was removed in 3.0
|
||||
GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65),
|
||||
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class, org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),
|
||||
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class,
|
||||
org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),
|
||||
HTTP_EXCEPTION(org.elasticsearch.http.HttpException.class, org.elasticsearch.http.HttpException::new, 67),
|
||||
ELASTICSEARCH_EXCEPTION(org.elasticsearch.ElasticsearchException.class, org.elasticsearch.ElasticsearchException::new, 68),
|
||||
SNAPSHOT_MISSING_EXCEPTION(org.elasticsearch.snapshots.SnapshotMissingException.class, org.elasticsearch.snapshots.SnapshotMissingException::new, 69),
|
||||
PRIMARY_MISSING_ACTION_EXCEPTION(org.elasticsearch.action.PrimaryMissingActionException.class, org.elasticsearch.action.PrimaryMissingActionException::new, 70),
|
||||
ELASTICSEARCH_EXCEPTION(org.elasticsearch.ElasticsearchException.class,
|
||||
org.elasticsearch.ElasticsearchException::new, 68),
|
||||
SNAPSHOT_MISSING_EXCEPTION(org.elasticsearch.snapshots.SnapshotMissingException.class,
|
||||
org.elasticsearch.snapshots.SnapshotMissingException::new, 69),
|
||||
PRIMARY_MISSING_ACTION_EXCEPTION(org.elasticsearch.action.PrimaryMissingActionException.class,
|
||||
org.elasticsearch.action.PrimaryMissingActionException::new, 70),
|
||||
FAILED_NODE_EXCEPTION(org.elasticsearch.action.FailedNodeException.class, org.elasticsearch.action.FailedNodeException::new, 71),
|
||||
SEARCH_PARSE_EXCEPTION(org.elasticsearch.search.SearchParseException.class, org.elasticsearch.search.SearchParseException::new, 72),
|
||||
CONCURRENT_SNAPSHOT_EXECUTION_EXCEPTION(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class, org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, 73),
|
||||
BLOB_STORE_EXCEPTION(org.elasticsearch.common.blobstore.BlobStoreException.class, org.elasticsearch.common.blobstore.BlobStoreException::new, 74),
|
||||
INCOMPATIBLE_CLUSTER_STATE_VERSION_EXCEPTION(org.elasticsearch.cluster.IncompatibleClusterStateVersionException.class, org.elasticsearch.cluster.IncompatibleClusterStateVersionException::new, 75),
|
||||
RECOVERY_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RecoveryEngineException.class, org.elasticsearch.index.engine.RecoveryEngineException::new, 76),
|
||||
UNCATEGORIZED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class, org.elasticsearch.common.util.concurrent.UncategorizedExecutionException::new, 77),
|
||||
TIMESTAMP_PARSING_EXCEPTION(org.elasticsearch.action.TimestampParsingException.class, org.elasticsearch.action.TimestampParsingException::new, 78),
|
||||
ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class, org.elasticsearch.action.RoutingMissingException::new, 79),
|
||||
INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class, org.elasticsearch.index.engine.IndexFailedEngineException::new, 80),
|
||||
INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class, org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81),
|
||||
REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class, org.elasticsearch.repositories.RepositoryException::new, 82),
|
||||
RECEIVE_TIMEOUT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ReceiveTimeoutTransportException.class, org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83),
|
||||
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class, org.elasticsearch.transport.NodeDisconnectedException::new, 84),
|
||||
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class, org.elasticsearch.index.AlreadyExpiredException::new, 85),
|
||||
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class, org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
|
||||
CONCURRENT_SNAPSHOT_EXECUTION_EXCEPTION(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class,
|
||||
org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, 73),
|
||||
BLOB_STORE_EXCEPTION(org.elasticsearch.common.blobstore.BlobStoreException.class,
|
||||
org.elasticsearch.common.blobstore.BlobStoreException::new, 74),
|
||||
INCOMPATIBLE_CLUSTER_STATE_VERSION_EXCEPTION(org.elasticsearch.cluster.IncompatibleClusterStateVersionException.class,
|
||||
org.elasticsearch.cluster.IncompatibleClusterStateVersionException::new, 75),
|
||||
RECOVERY_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RecoveryEngineException.class,
|
||||
org.elasticsearch.index.engine.RecoveryEngineException::new, 76),
|
||||
UNCATEGORIZED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class,
|
||||
org.elasticsearch.common.util.concurrent.UncategorizedExecutionException::new, 77),
|
||||
TIMESTAMP_PARSING_EXCEPTION(org.elasticsearch.action.TimestampParsingException.class,
|
||||
org.elasticsearch.action.TimestampParsingException::new, 78),
|
||||
ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class,
|
||||
org.elasticsearch.action.RoutingMissingException::new, 79),
|
||||
INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.IndexFailedEngineException::new, 80),
|
||||
INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81),
|
||||
REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class,
|
||||
org.elasticsearch.repositories.RepositoryException::new, 82),
|
||||
RECEIVE_TIMEOUT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ReceiveTimeoutTransportException.class,
|
||||
org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83),
|
||||
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class,
|
||||
org.elasticsearch.transport.NodeDisconnectedException::new, 84),
|
||||
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class,
|
||||
org.elasticsearch.index.AlreadyExpiredException::new, 85),
|
||||
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class,
|
||||
org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
|
||||
// 87 used to be for MergeMappingException
|
||||
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class, org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
|
||||
PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class, org.elasticsearch.percolator.PercolateException::new, 89),
|
||||
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
|
||||
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class, org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),
|
||||
DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class, org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),
|
||||
INDEX_WARMER_MISSING_EXCEPTION(org.elasticsearch.search.warmer.IndexWarmerMissingException.class, org.elasticsearch.search.warmer.IndexWarmerMissingException::new, 93),
|
||||
NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class, org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),
|
||||
INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class, org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),
|
||||
ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class, org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),
|
||||
INDEX_SHARD_SNAPSHOT_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotException.class, org.elasticsearch.index.snapshots.IndexShardSnapshotException::new, 98),
|
||||
INDEX_SHARD_NOT_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotStartedException.class, org.elasticsearch.index.shard.IndexShardNotStartedException::new, 99),
|
||||
SEARCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.action.search.SearchPhaseExecutionException.class, org.elasticsearch.action.search.SearchPhaseExecutionException::new, 100),
|
||||
ACTION_NOT_FOUND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionNotFoundTransportException.class, org.elasticsearch.transport.ActionNotFoundTransportException::new, 101),
|
||||
TRANSPORT_SERIALIZATION_EXCEPTION(org.elasticsearch.transport.TransportSerializationException.class, org.elasticsearch.transport.TransportSerializationException::new, 102),
|
||||
REMOTE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.RemoteTransportException.class, org.elasticsearch.transport.RemoteTransportException::new, 103),
|
||||
ENGINE_CREATION_FAILURE_EXCEPTION(org.elasticsearch.index.engine.EngineCreationFailureException.class, org.elasticsearch.index.engine.EngineCreationFailureException::new, 104),
|
||||
ROUTING_EXCEPTION(org.elasticsearch.cluster.routing.RoutingException.class, org.elasticsearch.cluster.routing.RoutingException::new, 105),
|
||||
INDEX_SHARD_RECOVERY_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveryException.class, org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
|
||||
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class, org.elasticsearch.repositories.RepositoryMissingException::new, 107),
|
||||
PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class, org.elasticsearch.index.percolator.PercolatorException::new, 108),
|
||||
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class, org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
|
||||
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class, org.elasticsearch.index.engine.FlushNotAllowedEngineException::new, 110),
|
||||
NO_CLASS_SETTINGS_EXCEPTION(org.elasticsearch.common.settings.NoClassSettingsException.class, org.elasticsearch.common.settings.NoClassSettingsException::new, 111),
|
||||
BIND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.BindTransportException.class, org.elasticsearch.transport.BindTransportException::new, 112),
|
||||
ALIASES_NOT_FOUND_EXCEPTION(org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException.class, org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException::new, 113),
|
||||
INDEX_SHARD_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveringException.class, org.elasticsearch.index.shard.IndexShardRecoveringException::new, 114),
|
||||
TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TranslogException.class, org.elasticsearch.index.translog.TranslogException::new, 115),
|
||||
PROCESS_CLUSTER_EVENT_TIMEOUT_EXCEPTION(org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class, org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException::new, 116),
|
||||
RETRY_ON_PRIMARY_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException.class, org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException::new, 117),
|
||||
ELASTICSEARCH_TIMEOUT_EXCEPTION(org.elasticsearch.ElasticsearchTimeoutException.class, org.elasticsearch.ElasticsearchTimeoutException::new, 118),
|
||||
QUERY_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.query.QueryPhaseExecutionException.class, org.elasticsearch.search.query.QueryPhaseExecutionException::new, 119),
|
||||
REPOSITORY_VERIFICATION_EXCEPTION(org.elasticsearch.repositories.RepositoryVerificationException.class, org.elasticsearch.repositories.RepositoryVerificationException::new, 120),
|
||||
INVALID_AGGREGATION_PATH_EXCEPTION(org.elasticsearch.search.aggregations.InvalidAggregationPathException.class, org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
|
||||
INDEX_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexAlreadyExistsException.class, org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
|
||||
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class, org.elasticsearch.script.Script.ScriptParseException::new, 124),
|
||||
HTTP_ON_TRANSPORT_EXCEPTION(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class, org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException::new, 125),
|
||||
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class, org.elasticsearch.index.mapper.MapperParsingException::new, 126),
|
||||
SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class, org.elasticsearch.search.SearchContextException::new, 127),
|
||||
SEARCH_SOURCE_BUILDER_EXCEPTION(org.elasticsearch.search.builder.SearchSourceBuilderException.class, org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128),
|
||||
ENGINE_CLOSED_EXCEPTION(org.elasticsearch.index.engine.EngineClosedException.class, org.elasticsearch.index.engine.EngineClosedException::new, 129),
|
||||
NO_SHARD_AVAILABLE_ACTION_EXCEPTION(org.elasticsearch.action.NoShardAvailableActionException.class, org.elasticsearch.action.NoShardAvailableActionException::new, 130),
|
||||
UNAVAILABLE_SHARDS_EXCEPTION(org.elasticsearch.action.UnavailableShardsException.class, org.elasticsearch.action.UnavailableShardsException::new, 131),
|
||||
FLUSH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushFailedEngineException.class, org.elasticsearch.index.engine.FlushFailedEngineException::new, 132),
|
||||
CIRCUIT_BREAKING_EXCEPTION(org.elasticsearch.common.breaker.CircuitBreakingException.class, org.elasticsearch.common.breaker.CircuitBreakingException::new, 133),
|
||||
NODE_NOT_CONNECTED_EXCEPTION(org.elasticsearch.transport.NodeNotConnectedException.class, org.elasticsearch.transport.NodeNotConnectedException::new, 134),
|
||||
STRICT_DYNAMIC_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.StrictDynamicMappingException.class, org.elasticsearch.index.mapper.StrictDynamicMappingException::new, 135),
|
||||
RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class, org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136),
|
||||
TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class, org.elasticsearch.indices.TypeMissingException::new, 137),
|
||||
FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class, org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140),
|
||||
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class, org.elasticsearch.index.query.QueryShardException::new, 141);
|
||||
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class,
|
||||
org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
|
||||
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
|
||||
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class,
|
||||
org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),
|
||||
DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class,
|
||||
org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),
|
||||
// 93 used to be for IndexWarmerMissingException
|
||||
NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class,
|
||||
org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),
|
||||
INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class,
|
||||
org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),
|
||||
ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class,
|
||||
org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),
|
||||
INDEX_SHARD_SNAPSHOT_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardSnapshotException::new, 98),
|
||||
INDEX_SHARD_NOT_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotStartedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardNotStartedException::new, 99),
|
||||
SEARCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.action.search.SearchPhaseExecutionException.class,
|
||||
org.elasticsearch.action.search.SearchPhaseExecutionException::new, 100),
|
||||
ACTION_NOT_FOUND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionNotFoundTransportException.class,
|
||||
org.elasticsearch.transport.ActionNotFoundTransportException::new, 101),
|
||||
TRANSPORT_SERIALIZATION_EXCEPTION(org.elasticsearch.transport.TransportSerializationException.class,
|
||||
org.elasticsearch.transport.TransportSerializationException::new, 102),
|
||||
REMOTE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.RemoteTransportException.class,
|
||||
org.elasticsearch.transport.RemoteTransportException::new, 103),
|
||||
ENGINE_CREATION_FAILURE_EXCEPTION(org.elasticsearch.index.engine.EngineCreationFailureException.class,
|
||||
org.elasticsearch.index.engine.EngineCreationFailureException::new, 104),
|
||||
ROUTING_EXCEPTION(org.elasticsearch.cluster.routing.RoutingException.class,
|
||||
org.elasticsearch.cluster.routing.RoutingException::new, 105),
|
||||
INDEX_SHARD_RECOVERY_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveryException.class,
|
||||
org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
|
||||
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class,
|
||||
org.elasticsearch.repositories.RepositoryMissingException::new, 107),
|
||||
PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class,
|
||||
org.elasticsearch.index.percolator.PercolatorException::new, 108),
|
||||
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class,
|
||||
org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
|
||||
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class,
|
||||
org.elasticsearch.index.engine.FlushNotAllowedEngineException::new, 110),
|
||||
NO_CLASS_SETTINGS_EXCEPTION(org.elasticsearch.common.settings.NoClassSettingsException.class,
|
||||
org.elasticsearch.common.settings.NoClassSettingsException::new, 111),
|
||||
BIND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.BindTransportException.class,
|
||||
org.elasticsearch.transport.BindTransportException::new, 112),
|
||||
ALIASES_NOT_FOUND_EXCEPTION(org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException.class,
|
||||
org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException::new, 113),
|
||||
INDEX_SHARD_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveringException.class,
|
||||
org.elasticsearch.index.shard.IndexShardRecoveringException::new, 114),
|
||||
TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TranslogException.class,
|
||||
org.elasticsearch.index.translog.TranslogException::new, 115),
|
||||
PROCESS_CLUSTER_EVENT_TIMEOUT_EXCEPTION(org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class,
|
||||
org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException::new, 116),
|
||||
RETRY_ON_PRIMARY_EXCEPTION(ReplicationOperation.RetryOnPrimaryException.class,
|
||||
ReplicationOperation.RetryOnPrimaryException::new, 117),
|
||||
ELASTICSEARCH_TIMEOUT_EXCEPTION(org.elasticsearch.ElasticsearchTimeoutException.class,
|
||||
org.elasticsearch.ElasticsearchTimeoutException::new, 118),
|
||||
QUERY_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.query.QueryPhaseExecutionException.class,
|
||||
org.elasticsearch.search.query.QueryPhaseExecutionException::new, 119),
|
||||
REPOSITORY_VERIFICATION_EXCEPTION(org.elasticsearch.repositories.RepositoryVerificationException.class,
|
||||
org.elasticsearch.repositories.RepositoryVerificationException::new, 120),
|
||||
INVALID_AGGREGATION_PATH_EXCEPTION(org.elasticsearch.search.aggregations.InvalidAggregationPathException.class,
|
||||
org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
|
||||
INDEX_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexAlreadyExistsException.class,
|
||||
org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
|
||||
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class,
|
||||
org.elasticsearch.script.Script.ScriptParseException::new, 124),
|
||||
HTTP_ON_TRANSPORT_EXCEPTION(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class,
|
||||
org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException::new, 125),
|
||||
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class,
|
||||
org.elasticsearch.index.mapper.MapperParsingException::new, 126),
|
||||
SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class,
|
||||
org.elasticsearch.search.SearchContextException::new, 127),
|
||||
SEARCH_SOURCE_BUILDER_EXCEPTION(org.elasticsearch.search.builder.SearchSourceBuilderException.class,
|
||||
org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128),
|
||||
ENGINE_CLOSED_EXCEPTION(org.elasticsearch.index.engine.EngineClosedException.class,
|
||||
org.elasticsearch.index.engine.EngineClosedException::new, 129),
|
||||
NO_SHARD_AVAILABLE_ACTION_EXCEPTION(org.elasticsearch.action.NoShardAvailableActionException.class,
|
||||
org.elasticsearch.action.NoShardAvailableActionException::new, 130),
|
||||
UNAVAILABLE_SHARDS_EXCEPTION(org.elasticsearch.action.UnavailableShardsException.class,
|
||||
org.elasticsearch.action.UnavailableShardsException::new, 131),
|
||||
FLUSH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.FlushFailedEngineException::new, 132),
|
||||
CIRCUIT_BREAKING_EXCEPTION(org.elasticsearch.common.breaker.CircuitBreakingException.class,
|
||||
org.elasticsearch.common.breaker.CircuitBreakingException::new, 133),
|
||||
NODE_NOT_CONNECTED_EXCEPTION(org.elasticsearch.transport.NodeNotConnectedException.class,
|
||||
org.elasticsearch.transport.NodeNotConnectedException::new, 134),
|
||||
STRICT_DYNAMIC_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.StrictDynamicMappingException.class,
|
||||
org.elasticsearch.index.mapper.StrictDynamicMappingException::new, 135),
|
||||
RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class,
|
||||
org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136),
|
||||
TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class,
|
||||
org.elasticsearch.indices.TypeMissingException::new, 137),
|
||||
FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class,
|
||||
org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140),
|
||||
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class,
|
||||
org.elasticsearch.index.query.QueryShardException::new, 141),
|
||||
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,
|
||||
ShardStateAction.NoLongerPrimaryShardException::new, 142);
|
||||
|
||||
|
||||
final Class<? extends ElasticsearchException> exceptionClass;
|
||||
final FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor;
|
||||
final int id;
|
||||
|
||||
ElasticsearchExceptionHandle(Class<? extends ElasticsearchException> exceptionClass, FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor, int id) {
|
||||
<E extends ElasticsearchException> ElasticsearchExceptionHandle(Class<E> exceptionClass,
|
||||
FunctionThatThrowsIOException<StreamInput, E> constructor, int id) {
|
||||
// We need the exceptionClass because you can't dig it out of the constructor reliably.
|
||||
this.exceptionClass = exceptionClass;
|
||||
this.constructor = constructor;
|
||||
this.id = id;
|
||||
|
@ -618,17 +759,17 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
}
|
||||
|
||||
static {
|
||||
final Map<Class<? extends ElasticsearchException>, ElasticsearchExceptionHandle> exceptions = Arrays.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.exceptionClass, e -> e));
|
||||
final Map<Integer, FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException>> idToSupplier = Arrays.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.id, e -> e.constructor));
|
||||
|
||||
ID_TO_SUPPLIER = Collections.unmodifiableMap(idToSupplier);
|
||||
CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE = Collections.unmodifiableMap(exceptions);
|
||||
ID_TO_SUPPLIER = unmodifiableMap(Arrays
|
||||
.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.id, e -> e.constructor)));
|
||||
CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE = unmodifiableMap(Arrays
|
||||
.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.exceptionClass, e -> e)));
|
||||
}
|
||||
|
||||
public String getIndex() {
|
||||
public Index getIndex() {
|
||||
List<String> index = getHeader(INDEX_HEADER_KEY);
|
||||
if (index != null && index.isEmpty() == false) {
|
||||
return index.get(0);
|
||||
List<String> index_uuid = getHeader(INDEX_HEADER_KEY_UUID);
|
||||
return new Index(index.get(0), index_uuid.get(0));
|
||||
}
|
||||
|
||||
return null;
|
||||
|
@ -645,22 +786,28 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
public void setIndex(Index index) {
|
||||
if (index != null) {
|
||||
addHeader(INDEX_HEADER_KEY, index.getName());
|
||||
addHeader(INDEX_HEADER_KEY_UUID, index.getUUID());
|
||||
}
|
||||
}
|
||||
|
||||
public void setIndex(String index) {
|
||||
if (index != null) {
|
||||
addHeader(INDEX_HEADER_KEY, index);
|
||||
setIndex(new Index(index, INDEX_UUID_NA_VALUE));
|
||||
}
|
||||
}
|
||||
|
||||
public void setShard(ShardId shardId) {
|
||||
if (shardId != null) {
|
||||
addHeader(INDEX_HEADER_KEY, shardId.getIndex());
|
||||
setIndex(shardId.getIndex());
|
||||
addHeader(SHARD_HEADER_KEY, Integer.toString(shardId.id()));
|
||||
}
|
||||
}
|
||||
|
||||
public void setShard(String index, int shardId) {
|
||||
setIndex(index);
|
||||
addHeader(SHARD_HEADER_KEY, Integer.toString(shardId));
|
||||
}
|
||||
|
||||
public void setResources(String type, String... id) {
|
||||
assert type != null;
|
||||
addHeader(RESOURCE_HEADER_ID_KEY, id);
|
||||
|
@ -685,9 +832,10 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t);
|
||||
builder.field("root_cause");
|
||||
builder.startArray();
|
||||
for (ElasticsearchException rootCause : rootCauses){
|
||||
for (ElasticsearchException rootCause : rootCauses) {
|
||||
builder.startObject();
|
||||
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
|
||||
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(
|
||||
Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
|
@ -698,4 +846,39 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
interface FunctionThatThrowsIOException<T, R> {
|
||||
R apply(T t) throws IOException;
|
||||
}
|
||||
|
||||
// lower cases and adds underscores to transitions in a name
|
||||
private static String toUnderscoreCase(String value) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
boolean changed = false;
|
||||
for (int i = 0; i < value.length(); i++) {
|
||||
char c = value.charAt(i);
|
||||
if (Character.isUpperCase(c)) {
|
||||
if (!changed) {
|
||||
// copy it over here
|
||||
for (int j = 0; j < i; j++) {
|
||||
sb.append(value.charAt(j));
|
||||
}
|
||||
changed = true;
|
||||
if (i == 0) {
|
||||
sb.append(Character.toLowerCase(c));
|
||||
} else {
|
||||
sb.append('_');
|
||||
sb.append(Character.toLowerCase(c));
|
||||
}
|
||||
} else {
|
||||
sb.append('_');
|
||||
sb.append(Character.toLowerCase(c));
|
||||
}
|
||||
} else {
|
||||
if (changed) {
|
||||
sb.append(c);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!changed) {
|
||||
return value;
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.action.ShardOperationFailedException;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -243,7 +244,12 @@ public final class ExceptionsHelper {
|
|||
|
||||
public GroupBy(Throwable t) {
|
||||
if (t instanceof ElasticsearchException) {
|
||||
index = ((ElasticsearchException) t).getIndex();
|
||||
final Index index = ((ElasticsearchException) t).getIndex();
|
||||
if (index != null) {
|
||||
this.index = index.getName();
|
||||
} else {
|
||||
this.index = null;
|
||||
}
|
||||
} else {
|
||||
index = null;
|
||||
}
|
||||
|
|
|
@ -22,15 +22,15 @@ package org.elasticsearch;
|
|||
import java.security.BasicPermission;
|
||||
|
||||
/**
|
||||
* Elasticsearch-specific permission to check before entering
|
||||
* {@code AccessController.doPrivileged()} blocks.
|
||||
* Elasticsearch-specific permission to check before entering
|
||||
* {@code AccessController.doPrivileged()} blocks.
|
||||
* <p>
|
||||
* We try to avoid these blocks in our code and keep security simple,
|
||||
* but we need them for a few special places to contain hacks for third
|
||||
* We try to avoid these blocks in our code and keep security simple,
|
||||
* but we need them for a few special places to contain hacks for third
|
||||
* party code, or dangerous things used by scripting engines.
|
||||
* <p>
|
||||
* All normal code has this permission, but checking this before truncating the stack
|
||||
* prevents unprivileged code (e.g. scripts), which do not have it, from gaining elevated
|
||||
* prevents unprivileged code (e.g. scripts), which do not have it, from gaining elevated
|
||||
* privileges.
|
||||
* <p>
|
||||
* In other words, don't do this:
|
||||
|
@ -57,9 +57,6 @@ import java.security.BasicPermission;
|
|||
* </code></pre>
|
||||
*/
|
||||
public final class SpecialPermission extends BasicPermission {
|
||||
|
||||
private static final long serialVersionUID = -4129500096157408168L;
|
||||
|
||||
/**
|
||||
* Creates a new SpecialPermision object.
|
||||
*/
|
||||
|
@ -68,11 +65,11 @@ public final class SpecialPermission extends BasicPermission {
|
|||
// but let's just keep it simple if we can.
|
||||
super("*");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a new SpecialPermission object.
|
||||
* This constructor exists for use by the {@code Policy} object to instantiate new Permission objects.
|
||||
*
|
||||
*
|
||||
* @param name ignored
|
||||
* @param actions ignored
|
||||
*/
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.common.SuppressForbidden;
|
|||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
|
@ -35,252 +34,50 @@ import java.io.IOException;
|
|||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public class Version {
|
||||
|
||||
// The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is Beta/RC indicator
|
||||
// AA values below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
|
||||
// the (internal) format of the id is there so we can easily do after/before checks on the id
|
||||
|
||||
// NOTE: indexes created with 3.6 use this constant for e.g. analysis chain emulation (imperfect)
|
||||
public static final org.apache.lucene.util.Version LUCENE_3_EMULATION_VERSION = org.apache.lucene.util.Version.LUCENE_4_0_0;
|
||||
|
||||
public static final int V_0_18_0_ID = /*00*/180099;
|
||||
public static final Version V_0_18_0 = new Version(V_0_18_0_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_1_ID = /*00*/180199;
|
||||
public static final Version V_0_18_1 = new Version(V_0_18_1_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_2_ID = /*00*/180299;
|
||||
public static final Version V_0_18_2 = new Version(V_0_18_2_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_3_ID = /*00*/180399;
|
||||
public static final Version V_0_18_3 = new Version(V_0_18_3_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_4_ID = /*00*/180499;
|
||||
public static final Version V_0_18_4 = new Version(V_0_18_4_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_5_ID = /*00*/180599;
|
||||
public static final Version V_0_18_5 = new Version(V_0_18_5_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_6_ID = /*00*/180699;
|
||||
public static final Version V_0_18_6 = new Version(V_0_18_6_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_7_ID = /*00*/180799;
|
||||
public static final Version V_0_18_7 = new Version(V_0_18_7_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_8_ID = /*00*/180899;
|
||||
public static final Version V_0_18_8 = new Version(V_0_18_8_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC1_ID = /*00*/190051;
|
||||
public static final Version V_0_19_0_RC1 = new Version(V_0_19_0_RC1_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC2_ID = /*00*/190052;
|
||||
public static final Version V_0_19_0_RC2 = new Version(V_0_19_0_RC2_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC3_ID = /*00*/190053;
|
||||
public static final Version V_0_19_0_RC3 = new Version(V_0_19_0_RC3_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_ID = /*00*/190099;
|
||||
public static final Version V_0_19_0 = new Version(V_0_19_0_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_1_ID = /*00*/190199;
|
||||
public static final Version V_0_19_1 = new Version(V_0_19_1_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_2_ID = /*00*/190299;
|
||||
public static final Version V_0_19_2 = new Version(V_0_19_2_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_3_ID = /*00*/190399;
|
||||
public static final Version V_0_19_3 = new Version(V_0_19_3_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_4_ID = /*00*/190499;
|
||||
public static final Version V_0_19_4 = new Version(V_0_19_4_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_5_ID = /*00*/190599;
|
||||
public static final Version V_0_19_5 = new Version(V_0_19_5_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_6_ID = /*00*/190699;
|
||||
public static final Version V_0_19_6 = new Version(V_0_19_6_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_7_ID = /*00*/190799;
|
||||
public static final Version V_0_19_7 = new Version(V_0_19_7_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_8_ID = /*00*/190899;
|
||||
public static final Version V_0_19_8 = new Version(V_0_19_8_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_9_ID = /*00*/190999;
|
||||
public static final Version V_0_19_9 = new Version(V_0_19_9_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_10_ID = /*00*/191099;
|
||||
public static final Version V_0_19_10 = new Version(V_0_19_10_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_11_ID = /*00*/191199;
|
||||
public static final Version V_0_19_11 = new Version(V_0_19_11_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_12_ID = /*00*/191299;
|
||||
public static final Version V_0_19_12 = new Version(V_0_19_12_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_13_ID = /*00*/191399;
|
||||
public static final Version V_0_19_13 = new Version(V_0_19_13_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_20_0_RC1_ID = /*00*/200051;
|
||||
public static final Version V_0_20_0_RC1 = new Version(V_0_20_0_RC1_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_0_ID = /*00*/200099;
|
||||
public static final Version V_0_20_0 = new Version(V_0_20_0_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_1_ID = /*00*/200199;
|
||||
public static final Version V_0_20_1 = new Version(V_0_20_1_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_2_ID = /*00*/200299;
|
||||
public static final Version V_0_20_2 = new Version(V_0_20_2_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_3_ID = /*00*/200399;
|
||||
public static final Version V_0_20_3 = new Version(V_0_20_3_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_4_ID = /*00*/200499;
|
||||
public static final Version V_0_20_4 = new Version(V_0_20_4_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_5_ID = /*00*/200599;
|
||||
public static final Version V_0_20_5 = new Version(V_0_20_5_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_6_ID = /*00*/200699;
|
||||
public static final Version V_0_20_6 = new Version(V_0_20_6_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_7_ID = /*00*/200799;
|
||||
public static final Version V_0_20_7 = new Version(V_0_20_7_ID, true, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_90_0_Beta1_ID = /*00*/900001;
|
||||
public static final Version V_0_90_0_Beta1 = new Version(V_0_90_0_Beta1_ID, false, org.apache.lucene.util.Version.LUCENE_4_1);
|
||||
public static final int V_0_90_0_RC1_ID = /*00*/900051;
|
||||
public static final Version V_0_90_0_RC1 = new Version(V_0_90_0_RC1_ID, false, org.apache.lucene.util.Version.LUCENE_4_1);
|
||||
public static final int V_0_90_0_RC2_ID = /*00*/900052;
|
||||
public static final Version V_0_90_0_RC2 = new Version(V_0_90_0_RC2_ID, false, org.apache.lucene.util.Version.LUCENE_4_2);
|
||||
public static final int V_0_90_0_ID = /*00*/900099;
|
||||
public static final Version V_0_90_0 = new Version(V_0_90_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_2);
|
||||
public static final int V_0_90_1_ID = /*00*/900199;
|
||||
public static final Version V_0_90_1 = new Version(V_0_90_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_3);
|
||||
public static final int V_0_90_2_ID = /*00*/900299;
|
||||
public static final Version V_0_90_2 = new Version(V_0_90_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_3);
|
||||
public static final int V_0_90_3_ID = /*00*/900399;
|
||||
public static final Version V_0_90_3 = new Version(V_0_90_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_4_ID = /*00*/900499;
|
||||
public static final Version V_0_90_4 = new Version(V_0_90_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_5_ID = /*00*/900599;
|
||||
public static final Version V_0_90_5 = new Version(V_0_90_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_6_ID = /*00*/900699;
|
||||
public static final Version V_0_90_6 = new Version(V_0_90_6_ID, false, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_0_90_7_ID = /*00*/900799;
|
||||
public static final Version V_0_90_7 = new Version(V_0_90_7_ID, false, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_0_90_8_ID = /*00*/900899;
|
||||
public static final Version V_0_90_8 = new Version(V_0_90_8_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_9_ID = /*00*/900999;
|
||||
public static final Version V_0_90_9 = new Version(V_0_90_9_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_10_ID = /*00*/901099;
|
||||
public static final Version V_0_90_10 = new Version(V_0_90_10_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_11_ID = /*00*/901199;
|
||||
public static final Version V_0_90_11 = new Version(V_0_90_11_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_12_ID = /*00*/901299;
|
||||
public static final Version V_0_90_12 = new Version(V_0_90_12_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_13_ID = /*00*/901399;
|
||||
public static final Version V_0_90_13 = new Version(V_0_90_13_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_14_ID = /*00*/901499;
|
||||
public static final Version V_0_90_14 = new Version(V_0_90_14_ID, true, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
|
||||
public static final int V_1_0_0_Beta1_ID = 1000001;
|
||||
public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, false, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_1_0_0_Beta2_ID = 1000002;
|
||||
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_RC1_ID = 1000051;
|
||||
public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_RC2_ID = 1000052;
|
||||
public static final Version V_1_0_0_RC2 = new Version(V_1_0_0_RC2_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_ID = 1000099;
|
||||
public static final Version V_1_0_0 = new Version(V_1_0_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_1_ID = 1000199;
|
||||
public static final Version V_1_0_1 = new Version(V_1_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_2_ID = 1000299;
|
||||
public static final Version V_1_0_2 = new Version(V_1_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_3_ID = 1000399;
|
||||
public static final Version V_1_0_3 = new Version(V_1_0_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_4_ID = 1000499;
|
||||
public static final Version V_1_0_4 = new Version(V_1_0_4_ID, true, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_1_0_ID = 1010099;
|
||||
public static final Version V_1_1_0 = new Version(V_1_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_1_1_ID = 1010199;
|
||||
public static final Version V_1_1_1 = new Version(V_1_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_1_2_ID = 1010299;
|
||||
public static final Version V_1_1_2 = new Version(V_1_1_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_2_0_ID = 1020099;
|
||||
public static final Version V_1_2_0 = new Version(V_1_2_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_1_ID = 1020199;
|
||||
public static final Version V_1_2_1 = new Version(V_1_2_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_2_ID = 1020299;
|
||||
public static final Version V_1_2_2 = new Version(V_1_2_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_3_ID = 1020399;
|
||||
public static final Version V_1_2_3 = new Version(V_1_2_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_4_ID = 1020499;
|
||||
public static final Version V_1_2_4 = new Version(V_1_2_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_5_ID = 1020599;
|
||||
public static final Version V_1_2_5 = new Version(V_1_2_5_ID, true, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_3_0_ID = 1030099;
|
||||
public static final Version V_1_3_0 = new Version(V_1_3_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_1_ID = 1030199;
|
||||
public static final Version V_1_3_1 = new Version(V_1_3_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_2_ID = 1030299;
|
||||
public static final Version V_1_3_2 = new Version(V_1_3_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_3_ID = 1030399;
|
||||
public static final Version V_1_3_3 = new Version(V_1_3_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_4_ID = 1030499;
|
||||
public static final Version V_1_3_4 = new Version(V_1_3_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_5_ID = 1030599;
|
||||
public static final Version V_1_3_5 = new Version(V_1_3_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_6_ID = 1030699;
|
||||
public static final Version V_1_3_6 = new Version(V_1_3_6_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_7_ID = 1030799;
|
||||
public static final Version V_1_3_7 = new Version(V_1_3_7_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_8_ID = 1030899;
|
||||
public static final Version V_1_3_8 = new Version(V_1_3_8_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_9_ID = 1030999;
|
||||
public static final Version V_1_3_9 = new Version(V_1_3_9_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_10_ID = /*00*/1031099;
|
||||
public static final Version V_1_3_10 = new Version(V_1_3_10_ID, true, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_4_0_Beta1_ID = 1040001;
|
||||
public static final Version V_1_4_0_Beta1 = new Version(V_1_4_0_Beta1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_1);
|
||||
public static final int V_1_4_0_ID = 1040099;
|
||||
public static final Version V_1_4_0 = new Version(V_1_4_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_1_ID = 1040199;
|
||||
public static final Version V_1_4_1 = new Version(V_1_4_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_2_ID = 1040299;
|
||||
public static final Version V_1_4_2 = new Version(V_1_4_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_3_ID = 1040399;
|
||||
public static final Version V_1_4_3 = new Version(V_1_4_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_1_4_4_ID = 1040499;
|
||||
public static final Version V_1_4_4 = new Version(V_1_4_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_1_4_5_ID = 1040599;
|
||||
public static final Version V_1_4_5 = new Version(V_1_4_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_4_6_ID = 1040699;
|
||||
public static final Version V_1_4_6 = new Version(V_1_4_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_0_ID = 1050099;
|
||||
public static final Version V_1_5_0 = new Version(V_1_5_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_1_ID = 1050199;
|
||||
public static final Version V_1_5_1 = new Version(V_1_5_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_2_ID = 1050299;
|
||||
public static final Version V_1_5_2 = new Version(V_1_5_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_3_ID = 1050399;
|
||||
public static final Version V_1_5_3 = new Version(V_1_5_3_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_0_ID = 1060099;
|
||||
public static final Version V_1_6_0 = new Version(V_1_6_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_1_ID = 1060199;
|
||||
public static final Version V_1_6_1 = new Version(V_1_6_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_2_ID = 1060299;
|
||||
public static final Version V_1_6_2 = new Version(V_1_6_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_3_ID = 1060399;
|
||||
public static final Version V_1_6_3 = new Version(V_1_6_3_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_0_ID = 1070099;
|
||||
public static final Version V_1_7_0 = new Version(V_1_7_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_1_ID = 1070199;
|
||||
public static final Version V_1_7_1 = new Version(V_1_7_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_2_ID = 1070299;
|
||||
public static final Version V_1_7_2 = new Version(V_1_7_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_3_ID = 1070399;
|
||||
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_4_ID = 1070499;
|
||||
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
|
||||
/*
|
||||
* The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA
|
||||
* values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99
|
||||
* indicating a release the (internal) format of the id is there so we can easily do after/before checks on the id
|
||||
*/
|
||||
public static final int V_2_0_0_beta1_ID = 2000001;
|
||||
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_0_beta2_ID = 2000002;
|
||||
public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_0_rc1_ID = 2000051;
|
||||
public static final Version V_2_0_0_rc1 = new Version(V_2_0_0_rc1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_0_rc1 = new Version(V_2_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_0_ID = 2000099;
|
||||
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_1_ID = 2000199;
|
||||
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_2_ID = 2000299;
|
||||
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_1_0_ID = 2010099;
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_1_ID = 2010199;
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_2_ID = 2010299;
|
||||
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_2_0_ID = 2020099;
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final int V_3_0_0_ID = 3000099;
|
||||
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final Version CURRENT = V_3_0_0;
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_2_1_ID = 2020199;
|
||||
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_2_2_ID = 2020299;
|
||||
public static final Version V_2_2_2 = new Version(V_2_2_2_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_3_0_ID = 2030099;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_1_ID = 2030199;
|
||||
public static final Version V_2_3_1 = new Version(V_2_3_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_2_ID = 2030299;
|
||||
public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
public static final Version V_5_0_0_alpha2 = new Version(V_5_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0_alpha2;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(Lucene.VERSION) : "Version must be upgraded to [" + Lucene.VERSION + "] is still set to [" + CURRENT.luceneVersion + "]";
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
+ org.apache.lucene.util.Version.LATEST + "] is still set to [" + CURRENT.luceneVersion + "]";
|
||||
}
|
||||
|
||||
public static Version readVersion(StreamInput in) throws IOException {
|
||||
|
@ -289,10 +86,24 @@ public class Version {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_3_0_0_ID:
|
||||
return V_3_0_0;
|
||||
case V_5_0_0_alpha2_ID:
|
||||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
return V_5_0_0_alpha1;
|
||||
case V_2_3_2_ID:
|
||||
return V_2_3_2;
|
||||
case V_2_3_1_ID:
|
||||
return V_2_3_1;
|
||||
case V_2_3_0_ID:
|
||||
return V_2_3_0;
|
||||
case V_2_2_2_ID:
|
||||
return V_2_2_2;
|
||||
case V_2_2_1_ID:
|
||||
return V_2_2_1;
|
||||
case V_2_2_0_ID:
|
||||
return V_2_2_0;
|
||||
case V_2_1_2_ID:
|
||||
return V_2_1_2;
|
||||
case V_2_1_1_ID:
|
||||
return V_2_1_1;
|
||||
case V_2_1_0_ID:
|
||||
|
@ -309,230 +120,23 @@ public class Version {
|
|||
return V_2_0_0_beta2;
|
||||
case V_2_0_0_beta1_ID:
|
||||
return V_2_0_0_beta1;
|
||||
case V_1_7_4_ID:
|
||||
return V_1_7_4;
|
||||
case V_1_7_3_ID:
|
||||
return V_1_7_3;
|
||||
case V_1_7_2_ID:
|
||||
return V_1_7_2;
|
||||
case V_1_7_1_ID:
|
||||
return V_1_7_1;
|
||||
case V_1_7_0_ID:
|
||||
return V_1_7_0;
|
||||
case V_1_6_3_ID:
|
||||
return V_1_6_3;
|
||||
case V_1_6_2_ID:
|
||||
return V_1_6_2;
|
||||
case V_1_6_1_ID:
|
||||
return V_1_6_1;
|
||||
case V_1_6_0_ID:
|
||||
return V_1_6_0;
|
||||
case V_1_5_3_ID:
|
||||
return V_1_5_3;
|
||||
case V_1_5_2_ID:
|
||||
return V_1_5_2;
|
||||
case V_1_5_1_ID:
|
||||
return V_1_5_1;
|
||||
case V_1_5_0_ID:
|
||||
return V_1_5_0;
|
||||
case V_1_4_6_ID:
|
||||
return V_1_4_6;
|
||||
case V_1_4_5_ID:
|
||||
return V_1_4_5;
|
||||
case V_1_4_4_ID:
|
||||
return V_1_4_4;
|
||||
case V_1_4_3_ID:
|
||||
return V_1_4_3;
|
||||
case V_1_4_2_ID:
|
||||
return V_1_4_2;
|
||||
case V_1_4_1_ID:
|
||||
return V_1_4_1;
|
||||
case V_1_4_0_ID:
|
||||
return V_1_4_0;
|
||||
case V_1_4_0_Beta1_ID:
|
||||
return V_1_4_0_Beta1;
|
||||
case V_1_3_10_ID:
|
||||
return V_1_3_10;
|
||||
case V_1_3_9_ID:
|
||||
return V_1_3_9;
|
||||
case V_1_3_8_ID:
|
||||
return V_1_3_8;
|
||||
case V_1_3_7_ID:
|
||||
return V_1_3_7;
|
||||
case V_1_3_6_ID:
|
||||
return V_1_3_6;
|
||||
case V_1_3_5_ID:
|
||||
return V_1_3_5;
|
||||
case V_1_3_4_ID:
|
||||
return V_1_3_4;
|
||||
case V_1_3_3_ID:
|
||||
return V_1_3_3;
|
||||
case V_1_3_2_ID:
|
||||
return V_1_3_2;
|
||||
case V_1_3_1_ID:
|
||||
return V_1_3_1;
|
||||
case V_1_3_0_ID:
|
||||
return V_1_3_0;
|
||||
case V_1_2_5_ID:
|
||||
return V_1_2_5;
|
||||
case V_1_2_4_ID:
|
||||
return V_1_2_4;
|
||||
case V_1_2_3_ID:
|
||||
return V_1_2_3;
|
||||
case V_1_2_2_ID:
|
||||
return V_1_2_2;
|
||||
case V_1_2_1_ID:
|
||||
return V_1_2_1;
|
||||
case V_1_2_0_ID:
|
||||
return V_1_2_0;
|
||||
case V_1_1_2_ID:
|
||||
return V_1_1_2;
|
||||
case V_1_1_1_ID:
|
||||
return V_1_1_1;
|
||||
case V_1_1_0_ID:
|
||||
return V_1_1_0;
|
||||
case V_1_0_4_ID:
|
||||
return V_1_0_4;
|
||||
case V_1_0_3_ID:
|
||||
return V_1_0_3;
|
||||
case V_1_0_2_ID:
|
||||
return V_1_0_2;
|
||||
case V_1_0_1_ID:
|
||||
return V_1_0_1;
|
||||
case V_1_0_0_ID:
|
||||
return V_1_0_0;
|
||||
case V_1_0_0_RC2_ID:
|
||||
return V_1_0_0_RC2;
|
||||
case V_1_0_0_RC1_ID:
|
||||
return V_1_0_0_RC1;
|
||||
case V_1_0_0_Beta2_ID:
|
||||
return V_1_0_0_Beta2;
|
||||
case V_1_0_0_Beta1_ID:
|
||||
return V_1_0_0_Beta1;
|
||||
case V_0_90_14_ID:
|
||||
return V_0_90_14;
|
||||
case V_0_90_13_ID:
|
||||
return V_0_90_13;
|
||||
case V_0_90_12_ID:
|
||||
return V_0_90_12;
|
||||
case V_0_90_11_ID:
|
||||
return V_0_90_11;
|
||||
case V_0_90_10_ID:
|
||||
return V_0_90_10;
|
||||
case V_0_90_9_ID:
|
||||
return V_0_90_9;
|
||||
case V_0_90_8_ID:
|
||||
return V_0_90_8;
|
||||
case V_0_90_7_ID:
|
||||
return V_0_90_7;
|
||||
case V_0_90_6_ID:
|
||||
return V_0_90_6;
|
||||
case V_0_90_5_ID:
|
||||
return V_0_90_5;
|
||||
case V_0_90_4_ID:
|
||||
return V_0_90_4;
|
||||
case V_0_90_3_ID:
|
||||
return V_0_90_3;
|
||||
case V_0_90_2_ID:
|
||||
return V_0_90_2;
|
||||
case V_0_90_1_ID:
|
||||
return V_0_90_1;
|
||||
case V_0_90_0_ID:
|
||||
return V_0_90_0;
|
||||
case V_0_90_0_RC2_ID:
|
||||
return V_0_90_0_RC2;
|
||||
case V_0_90_0_RC1_ID:
|
||||
return V_0_90_0_RC1;
|
||||
case V_0_90_0_Beta1_ID:
|
||||
return V_0_90_0_Beta1;
|
||||
|
||||
case V_0_20_7_ID:
|
||||
return V_0_20_7;
|
||||
case V_0_20_6_ID:
|
||||
return V_0_20_6;
|
||||
case V_0_20_5_ID:
|
||||
return V_0_20_5;
|
||||
case V_0_20_4_ID:
|
||||
return V_0_20_4;
|
||||
case V_0_20_3_ID:
|
||||
return V_0_20_3;
|
||||
case V_0_20_2_ID:
|
||||
return V_0_20_2;
|
||||
case V_0_20_1_ID:
|
||||
return V_0_20_1;
|
||||
case V_0_20_0_ID:
|
||||
return V_0_20_0;
|
||||
case V_0_20_0_RC1_ID:
|
||||
return V_0_20_0_RC1;
|
||||
|
||||
case V_0_19_0_RC1_ID:
|
||||
return V_0_19_0_RC1;
|
||||
case V_0_19_0_RC2_ID:
|
||||
return V_0_19_0_RC2;
|
||||
case V_0_19_0_RC3_ID:
|
||||
return V_0_19_0_RC3;
|
||||
case V_0_19_0_ID:
|
||||
return V_0_19_0;
|
||||
case V_0_19_1_ID:
|
||||
return V_0_19_1;
|
||||
case V_0_19_2_ID:
|
||||
return V_0_19_2;
|
||||
case V_0_19_3_ID:
|
||||
return V_0_19_3;
|
||||
case V_0_19_4_ID:
|
||||
return V_0_19_4;
|
||||
case V_0_19_5_ID:
|
||||
return V_0_19_5;
|
||||
case V_0_19_6_ID:
|
||||
return V_0_19_6;
|
||||
case V_0_19_7_ID:
|
||||
return V_0_19_7;
|
||||
case V_0_19_8_ID:
|
||||
return V_0_19_8;
|
||||
case V_0_19_9_ID:
|
||||
return V_0_19_9;
|
||||
case V_0_19_10_ID:
|
||||
return V_0_19_10;
|
||||
case V_0_19_11_ID:
|
||||
return V_0_19_11;
|
||||
case V_0_19_12_ID:
|
||||
return V_0_19_12;
|
||||
case V_0_19_13_ID:
|
||||
return V_0_19_13;
|
||||
|
||||
case V_0_18_0_ID:
|
||||
return V_0_18_0;
|
||||
case V_0_18_1_ID:
|
||||
return V_0_18_1;
|
||||
case V_0_18_2_ID:
|
||||
return V_0_18_2;
|
||||
case V_0_18_3_ID:
|
||||
return V_0_18_3;
|
||||
case V_0_18_4_ID:
|
||||
return V_0_18_4;
|
||||
case V_0_18_5_ID:
|
||||
return V_0_18_5;
|
||||
case V_0_18_6_ID:
|
||||
return V_0_18_6;
|
||||
case V_0_18_7_ID:
|
||||
return V_0_18_7;
|
||||
case V_0_18_8_ID:
|
||||
return V_0_18_8;
|
||||
|
||||
default:
|
||||
return new Version(id, false, Lucene.VERSION);
|
||||
return new Version(id, org.apache.lucene.util.Version.LATEST);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link Version} of Elasticsearch that has been used to create an index given its settings.
|
||||
*
|
||||
* @throws IllegalStateException if the given index settings doesn't contain a value for the key {@value IndexMetaData#SETTING_VERSION_CREATED}
|
||||
* @throws IllegalStateException if the given index settings doesn't contain a value for the key
|
||||
* {@value IndexMetaData#SETTING_VERSION_CREATED}
|
||||
*/
|
||||
public static Version indexCreated(Settings indexSettings) {
|
||||
final Version indexVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null);
|
||||
if (indexVersion == null) {
|
||||
throw new IllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_INDEX_UUID) + "]");
|
||||
throw new IllegalStateException(
|
||||
"[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: ["
|
||||
+ indexSettings.get(IndexMetaData.SETTING_INDEX_UUID) + "]");
|
||||
}
|
||||
return indexVersion;
|
||||
}
|
||||
|
@ -555,19 +159,24 @@ public class Version {
|
|||
if (!Strings.hasLength(version)) {
|
||||
return Version.CURRENT;
|
||||
}
|
||||
final boolean snapshot;
|
||||
final boolean snapshot; // this is some BWC for 2.x and before indices
|
||||
if (snapshot = version.endsWith("-SNAPSHOT")) {
|
||||
version = version.substring(0, version.length() - 9);
|
||||
}
|
||||
String[] parts = version.split("\\.|\\-");
|
||||
if (parts.length < 3 || parts.length > 4) {
|
||||
throw new IllegalArgumentException("the version needs to contain major, minor, and revision, and optionally the build: " + version);
|
||||
throw new IllegalArgumentException(
|
||||
"the version needs to contain major, minor, and revision, and optionally the build: " + version);
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
final int rawMajor = Integer.parseInt(parts[0]);
|
||||
if (rawMajor >= 5 && snapshot) { // we don't support snapshot as part of the version here anymore
|
||||
throw new IllegalArgumentException("illegal version format - snapshots are only supported until version 2.x");
|
||||
}
|
||||
final int betaOffset = rawMajor < 5 ? 0 : 25;
|
||||
//we reverse the version id calculation based on some assumption as we can't reliably reverse the modulo
|
||||
final int major = Integer.parseInt(parts[0]) * 1000000;
|
||||
final int major = rawMajor * 1000000;
|
||||
final int minor = Integer.parseInt(parts[1]) * 10000;
|
||||
final int revision = Integer.parseInt(parts[2]) * 100;
|
||||
|
||||
|
@ -575,19 +184,21 @@ public class Version {
|
|||
int build = 99;
|
||||
if (parts.length == 4) {
|
||||
String buildStr = parts[3];
|
||||
if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) {
|
||||
build = Integer.parseInt(buildStr.substring(4));
|
||||
}
|
||||
if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) {
|
||||
if (buildStr.startsWith("alpha")) {
|
||||
assert rawMajor >= 5 : "major must be >= 5 but was " + major;
|
||||
build = Integer.parseInt(buildStr.substring(5));
|
||||
assert build < 25 : "expected a beta build but " + build + " >= 25";
|
||||
} else if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) {
|
||||
build = betaOffset + Integer.parseInt(buildStr.substring(4));
|
||||
assert build < 50 : "expected a beta build but " + build + " >= 50";
|
||||
} else if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) {
|
||||
build = Integer.parseInt(buildStr.substring(2)) + 50;
|
||||
} else {
|
||||
throw new IllegalArgumentException("unable to parse version " + version);
|
||||
}
|
||||
}
|
||||
|
||||
final Version versionFromId = fromId(major + minor + revision + build);
|
||||
if (snapshot != versionFromId.snapshot()) {
|
||||
return new Version(versionFromId.id, snapshot, versionFromId.luceneVersion);
|
||||
}
|
||||
return versionFromId;
|
||||
return fromId(major + minor + revision + build);
|
||||
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("unable to parse version " + version, e);
|
||||
|
@ -599,23 +210,17 @@ public class Version {
|
|||
public final byte minor;
|
||||
public final byte revision;
|
||||
public final byte build;
|
||||
public final Boolean snapshot;
|
||||
public final org.apache.lucene.util.Version luceneVersion;
|
||||
|
||||
Version(int id, boolean snapshot, org.apache.lucene.util.Version luceneVersion) {
|
||||
Version(int id, org.apache.lucene.util.Version luceneVersion) {
|
||||
this.id = id;
|
||||
this.major = (byte) ((id / 1000000) % 100);
|
||||
this.minor = (byte) ((id / 10000) % 100);
|
||||
this.revision = (byte) ((id / 100) % 100);
|
||||
this.build = (byte) (id % 100);
|
||||
this.snapshot = snapshot;
|
||||
this.luceneVersion = luceneVersion;
|
||||
}
|
||||
|
||||
public boolean snapshot() {
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
public boolean after(Version version) {
|
||||
return version.id < id;
|
||||
}
|
||||
|
@ -643,19 +248,26 @@ public class Version {
|
|||
return Version.smallest(this, fromId(major * 1000000 + 99));
|
||||
}
|
||||
|
||||
/**
|
||||
* Just the version number (without -SNAPSHOT if snapshot).
|
||||
*/
|
||||
public String number() {
|
||||
@SuppressForbidden(reason = "System.out.*")
|
||||
public static void main(String[] args) {
|
||||
System.out.println("Version: " + Version.CURRENT + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() + ", JVM: "
|
||||
+ JvmInfo.jvmInfo().version());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(major).append('.').append(minor).append('.').append(revision);
|
||||
if (isBeta()) {
|
||||
if (isAlpha()) {
|
||||
sb.append("-alpha");
|
||||
sb.append(build);
|
||||
} else if (isBeta()) {
|
||||
if (major >= 2) {
|
||||
sb.append("-beta");
|
||||
} else {
|
||||
sb.append(".Beta");
|
||||
}
|
||||
sb.append(build);
|
||||
sb.append(major < 5 ? build : build-25);
|
||||
} else if (build < 99) {
|
||||
if (major >= 2) {
|
||||
sb.append("-rc");
|
||||
|
@ -667,21 +279,6 @@ public class Version {
|
|||
return sb.toString();
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "System.out.*")
|
||||
public static void main(String[] args) {
|
||||
System.out.println("Version: " + Version.CURRENT + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() + ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(number());
|
||||
if (snapshot()) {
|
||||
sb.append("-SNAPSHOT");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
|
@ -706,7 +303,16 @@ public class Version {
|
|||
}
|
||||
|
||||
public boolean isBeta() {
|
||||
return build < 50;
|
||||
return major < 5 ? build < 50 : build >= 25 && build < 50;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true iff this version is an alpha version
|
||||
* Note: This has been introduced in elasticsearch version 5. Previous versions will never
|
||||
* have an alpha version.
|
||||
*/
|
||||
public boolean isAlpha() {
|
||||
return major < 5 ? false : build < 25;
|
||||
}
|
||||
|
||||
public boolean isRC() {
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.concurrent.Future;
|
||||
|
|
|
@ -21,18 +21,16 @@ package org.elasticsearch.action;
|
|||
|
||||
/**
|
||||
* A listener for action responses or failures.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public interface ActionListener<Response> {
|
||||
|
||||
/**
|
||||
* A response handler.
|
||||
* Handle action response. This response may constitute a failure or a
|
||||
* success but it is up to the listener to make that decision.
|
||||
*/
|
||||
void onResponse(Response response);
|
||||
|
||||
/**
|
||||
* A failure handler.
|
||||
* A failure caused by an exception at some phase of the task.
|
||||
*/
|
||||
void onFailure(Throwable e);
|
||||
}
|
||||
|
|
|
@ -24,16 +24,21 @@ import org.elasticsearch.transport.BaseTransportResponseHandler;
|
|||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* A simple base class for action response listeners, defaulting to using the SAME executor (as its
|
||||
* very common on response handlers).
|
||||
*/
|
||||
public abstract class ActionListenerResponseHandler<Response extends TransportResponse> extends BaseTransportResponseHandler<Response> {
|
||||
public class ActionListenerResponseHandler<Response extends TransportResponse> extends BaseTransportResponseHandler<Response> {
|
||||
|
||||
private final ActionListener<Response> listener;
|
||||
private final Supplier<Response> responseSupplier;
|
||||
|
||||
public ActionListenerResponseHandler(ActionListener<Response> listener) {
|
||||
this.listener = listener;
|
||||
public ActionListenerResponseHandler(ActionListener<Response> listener, Supplier<Response> responseSupplier) {
|
||||
this.listener = Objects.requireNonNull(listener);
|
||||
this.responseSupplier = Objects.requireNonNull(responseSupplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -46,6 +51,11 @@ public abstract class ActionListenerResponseHandler<Response extends TransportRe
|
|||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Response newInstance() {
|
||||
return responseSupplier.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction;
|
||||
import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
|
||||
import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsAction;
|
||||
|
@ -28,6 +30,10 @@ import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction
|
|||
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction;
|
||||
|
@ -58,6 +64,8 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction;
|
|||
import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateAction;
|
||||
import org.elasticsearch.action.admin.cluster.validate.template.TransportRenderSearchTemplateAction;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction;
|
||||
import org.elasticsearch.action.admin.indices.alias.TransportIndicesAliasesAction;
|
||||
import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistAction;
|
||||
|
@ -79,7 +87,9 @@ import org.elasticsearch.action.admin.indices.exists.indices.TransportIndicesExi
|
|||
import org.elasticsearch.action.admin.indices.exists.types.TransportTypesExistsAction;
|
||||
import org.elasticsearch.action.admin.indices.exists.types.TypesExistsAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.TransportFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.TransportForceMergeAction;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
|
||||
|
@ -121,14 +131,6 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction;
|
|||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsAction;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
|
||||
import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateAction;
|
||||
import org.elasticsearch.action.admin.cluster.validate.template.TransportRenderSearchTemplateAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.TransportDeleteWarmerAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.TransportGetWarmersAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.put.TransportPutWarmerAction;
|
||||
import org.elasticsearch.action.bulk.BulkAction;
|
||||
import org.elasticsearch.action.bulk.TransportBulkAction;
|
||||
import org.elasticsearch.action.bulk.TransportShardBulkAction;
|
||||
|
@ -145,17 +147,28 @@ import org.elasticsearch.action.get.TransportMultiGetAction;
|
|||
import org.elasticsearch.action.get.TransportShardMultiGetAction;
|
||||
import org.elasticsearch.action.index.IndexAction;
|
||||
import org.elasticsearch.action.index.TransportIndexAction;
|
||||
import org.elasticsearch.action.indexedscripts.delete.DeleteIndexedScriptAction;
|
||||
import org.elasticsearch.action.indexedscripts.delete.TransportDeleteIndexedScriptAction;
|
||||
import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptAction;
|
||||
import org.elasticsearch.action.indexedscripts.get.TransportGetIndexedScriptAction;
|
||||
import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptAction;
|
||||
import org.elasticsearch.action.indexedscripts.put.TransportPutIndexedScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.TransportGetStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction;
|
||||
import org.elasticsearch.action.ingest.IngestActionFilter;
|
||||
import org.elasticsearch.action.ingest.IngestProxyActionFilter;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineAction;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.GetPipelineAction;
|
||||
import org.elasticsearch.action.ingest.GetPipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.PutPipelineAction;
|
||||
import org.elasticsearch.action.ingest.PutPipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineAction;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineTransportAction;
|
||||
import org.elasticsearch.action.main.MainAction;
|
||||
import org.elasticsearch.action.main.TransportMainAction;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.PercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportMultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportPercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportShardMultiPercolateAction;
|
||||
import org.elasticsearch.action.search.ClearScrollAction;
|
||||
import org.elasticsearch.action.search.MultiSearchAction;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
|
@ -164,14 +177,6 @@ import org.elasticsearch.action.search.TransportClearScrollAction;
|
|||
import org.elasticsearch.action.search.TransportMultiSearchAction;
|
||||
import org.elasticsearch.action.search.TransportSearchAction;
|
||||
import org.elasticsearch.action.search.TransportSearchScrollAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryThenFetchAction;
|
||||
import org.elasticsearch.action.suggest.SuggestAction;
|
||||
import org.elasticsearch.action.suggest.TransportSuggestAction;
|
||||
import org.elasticsearch.action.support.ActionFilter;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
|
@ -182,7 +187,6 @@ import org.elasticsearch.action.termvectors.TermVectorsAction;
|
|||
import org.elasticsearch.action.termvectors.TransportMultiTermVectorsAction;
|
||||
import org.elasticsearch.action.termvectors.TransportShardMultiTermsVectorAction;
|
||||
import org.elasticsearch.action.termvectors.TransportTermVectorsAction;
|
||||
import org.elasticsearch.action.termvectors.dfs.TransportDfsOnlyAction;
|
||||
import org.elasticsearch.action.update.TransportUpdateAction;
|
||||
import org.elasticsearch.action.update.UpdateAction;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
|
@ -202,7 +206,7 @@ public class ActionModule extends AbstractModule {
|
|||
private final Map<String, ActionEntry> actions = new HashMap<>();
|
||||
private final List<Class<? extends ActionFilter>> actionFilters = new ArrayList<>();
|
||||
|
||||
static class ActionEntry<Request extends ActionRequest, Response extends ActionResponse> {
|
||||
static class ActionEntry<Request extends ActionRequest<Request>, Response extends ActionResponse> {
|
||||
public final GenericAction<Request, Response> action;
|
||||
public final Class<? extends TransportAction<Request, Response>> transportAction;
|
||||
public final Class[] supportTransportActions;
|
||||
|
@ -212,13 +216,13 @@ public class ActionModule extends AbstractModule {
|
|||
this.transportAction = transportAction;
|
||||
this.supportTransportActions = supportTransportActions;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
private final boolean ingestEnabled;
|
||||
private final boolean proxy;
|
||||
|
||||
public ActionModule(boolean proxy) {
|
||||
public ActionModule(boolean ingestEnabled, boolean proxy) {
|
||||
this.ingestEnabled = ingestEnabled;
|
||||
this.proxy = proxy;
|
||||
}
|
||||
|
||||
|
@ -231,7 +235,7 @@ public class ActionModule extends AbstractModule {
|
|||
* @param <Request> The request type.
|
||||
* @param <Response> The response type.
|
||||
*/
|
||||
public <Request extends ActionRequest, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
|
||||
actions.put(action.name(), new ActionEntry<>(action, transportAction, supportTransportActions));
|
||||
}
|
||||
|
||||
|
@ -242,6 +246,13 @@ public class ActionModule extends AbstractModule {
|
|||
|
||||
@Override
|
||||
protected void configure() {
|
||||
if (proxy == false) {
|
||||
if (ingestEnabled) {
|
||||
registerFilter(IngestActionFilter.class);
|
||||
} else {
|
||||
registerFilter(IngestProxyActionFilter.class);
|
||||
}
|
||||
}
|
||||
|
||||
Multibinder<ActionFilter> actionFilterMultibinder = Multibinder.newSetBinder(binder(), ActionFilter.class);
|
||||
for (Class<? extends ActionFilter> actionFilter : actionFilters) {
|
||||
|
@ -250,10 +261,14 @@ public class ActionModule extends AbstractModule {
|
|||
bind(ActionFilters.class).asEagerSingleton();
|
||||
bind(AutoCreateIndex.class).asEagerSingleton();
|
||||
bind(DestructiveOperations.class).asEagerSingleton();
|
||||
registerAction(MainAction.INSTANCE, TransportMainAction.class);
|
||||
registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
|
||||
registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
|
||||
registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
|
||||
registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class);
|
||||
registerAction(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class);
|
||||
|
||||
registerAction(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class);
|
||||
registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
|
||||
registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
|
||||
registerAction(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class);
|
||||
|
@ -293,56 +308,49 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
|
||||
registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class);
|
||||
registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
|
||||
registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class);
|
||||
registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
|
||||
registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
|
||||
registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
|
||||
registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class);
|
||||
registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class);
|
||||
registerAction(PutWarmerAction.INSTANCE, TransportPutWarmerAction.class);
|
||||
registerAction(DeleteWarmerAction.INSTANCE, TransportDeleteWarmerAction.class);
|
||||
registerAction(GetWarmersAction.INSTANCE, TransportGetWarmersAction.class);
|
||||
registerAction(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class);
|
||||
registerAction(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class);
|
||||
registerAction(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class);
|
||||
|
||||
registerAction(IndexAction.INSTANCE, TransportIndexAction.class);
|
||||
registerAction(GetAction.INSTANCE, TransportGetAction.class);
|
||||
registerAction(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class,
|
||||
TransportDfsOnlyAction.class);
|
||||
registerAction(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class);
|
||||
registerAction(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class,
|
||||
TransportShardMultiTermsVectorAction.class);
|
||||
registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class);
|
||||
registerAction(SuggestAction.INSTANCE, TransportSuggestAction.class);
|
||||
registerAction(UpdateAction.INSTANCE, TransportUpdateAction.class);
|
||||
registerAction(MultiGetAction.INSTANCE, TransportMultiGetAction.class,
|
||||
TransportShardMultiGetAction.class);
|
||||
registerAction(BulkAction.INSTANCE, TransportBulkAction.class,
|
||||
TransportShardBulkAction.class);
|
||||
registerAction(SearchAction.INSTANCE, TransportSearchAction.class,
|
||||
TransportSearchDfsQueryThenFetchAction.class,
|
||||
TransportSearchQueryThenFetchAction.class,
|
||||
TransportSearchDfsQueryAndFetchAction.class,
|
||||
TransportSearchQueryAndFetchAction.class
|
||||
);
|
||||
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class,
|
||||
TransportSearchScrollQueryThenFetchAction.class,
|
||||
TransportSearchScrollQueryAndFetchAction.class
|
||||
);
|
||||
registerAction(SearchAction.INSTANCE, TransportSearchAction.class);
|
||||
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class);
|
||||
registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
|
||||
registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class);
|
||||
registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class, TransportShardMultiPercolateAction.class);
|
||||
registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class);
|
||||
registerAction(ExplainAction.INSTANCE, TransportExplainAction.class);
|
||||
registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class);
|
||||
registerAction(RecoveryAction.INSTANCE, TransportRecoveryAction.class);
|
||||
registerAction(RenderSearchTemplateAction.INSTANCE, TransportRenderSearchTemplateAction.class);
|
||||
|
||||
//Indexed scripts
|
||||
registerAction(PutIndexedScriptAction.INSTANCE, TransportPutIndexedScriptAction.class);
|
||||
registerAction(GetIndexedScriptAction.INSTANCE, TransportGetIndexedScriptAction.class);
|
||||
registerAction(DeleteIndexedScriptAction.INSTANCE, TransportDeleteIndexedScriptAction.class);
|
||||
registerAction(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class);
|
||||
registerAction(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class);
|
||||
registerAction(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class);
|
||||
|
||||
registerAction(FieldStatsAction.INSTANCE, TransportFieldStatsTransportAction.class);
|
||||
|
||||
registerAction(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class);
|
||||
registerAction(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class);
|
||||
registerAction(DeletePipelineAction.INSTANCE, DeletePipelineTransportAction.class);
|
||||
registerAction(SimulatePipelineAction.INSTANCE, SimulatePipelineTransportAction.class);
|
||||
|
||||
// register Name -> GenericAction Map that can be injected to instances.
|
||||
MapBinder<String, GenericAction> actionsBinder
|
||||
= MapBinder.newMapBinder(binder(), String.class, GenericAction.class);
|
||||
|
|
|
@ -28,17 +28,13 @@ import java.io.IOException;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class ActionRequest<T extends ActionRequest> extends TransportRequest {
|
||||
public abstract class ActionRequest<Request extends ActionRequest<Request>> extends TransportRequest {
|
||||
|
||||
public ActionRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
protected ActionRequest(ActionRequest request) {
|
||||
super(request);
|
||||
// this does not set the listenerThreaded API, if needed, its up to the caller to set it
|
||||
// since most times, we actually want it to not be threaded...
|
||||
//this.listenerThreaded = request.listenerThreaded();
|
||||
// this.listenerThreaded = request.listenerThreaded();
|
||||
}
|
||||
|
||||
public abstract ActionRequestValidationException validate();
|
||||
|
|
|
@ -49,12 +49,6 @@ public abstract class ActionRequestBuilder<Request extends ActionRequest, Respon
|
|||
return this.request;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder putHeader(String key, Object value) {
|
||||
request.putHeader(key, value);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
|
||||
public ListenableActionFuture<Response> execute() {
|
||||
PlainListenableActionFuture<Response> future = new PlainListenableActionFuture<>(threadPool);
|
||||
execute(future);
|
||||
|
|
|
@ -35,7 +35,7 @@ public interface AliasesRequest extends IndicesRequest.Replaceable {
|
|||
/**
|
||||
* Sets the array of aliases that the action relates to
|
||||
*/
|
||||
AliasesRequest aliases(String[] aliases);
|
||||
AliasesRequest aliases(String... aliases);
|
||||
|
||||
/**
|
||||
* Returns true if wildcards expressions among aliases should be resolved, false otherwise
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A base class for the response of a write operation that involves a single doc
|
||||
*/
|
||||
public abstract class DocWriteResponse extends ReplicationResponse implements StatusToXContent {
|
||||
|
||||
private ShardId shardId;
|
||||
private String id;
|
||||
private String type;
|
||||
private long version;
|
||||
|
||||
public DocWriteResponse(ShardId shardId, String type, String id, long version) {
|
||||
this.shardId = shardId;
|
||||
this.type = type;
|
||||
this.id = id;
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
// needed for deserialization
|
||||
protected DocWriteResponse() {
|
||||
}
|
||||
|
||||
/**
|
||||
* The index the document was changed in.
|
||||
*/
|
||||
public String getIndex() {
|
||||
return this.shardId.getIndexName();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The exact shard the document was changed in.
|
||||
*/
|
||||
public ShardId getShardId() {
|
||||
return this.shardId;
|
||||
}
|
||||
|
||||
/**
|
||||
* The type of the document changed.
|
||||
*/
|
||||
public String getType() {
|
||||
return this.type;
|
||||
}
|
||||
|
||||
/**
|
||||
* The id of the document changed.
|
||||
*/
|
||||
public String getId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current version of the doc.
|
||||
*/
|
||||
public long getVersion() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
/** returns the rest status for this response (based on {@link ShardInfo#status()} */
|
||||
public RestStatus status() {
|
||||
return getShardInfo().status();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shardId = ShardId.readShardId(in);
|
||||
type = in.readString();
|
||||
id = in.readString();
|
||||
version = in.readZLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shardId.writeTo(out);
|
||||
out.writeString(type);
|
||||
out.writeString(id);
|
||||
out.writeZLong(version);
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String _INDEX = "_index";
|
||||
static final String _TYPE = "_type";
|
||||
static final String _ID = "_id";
|
||||
static final String _VERSION = "_version";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
ReplicationResponse.ShardInfo shardInfo = getShardInfo();
|
||||
builder.field(Fields._INDEX, shardId.getIndexName())
|
||||
.field(Fields._TYPE, type)
|
||||
.field(Fields._ID, id)
|
||||
.field(Fields._VERSION, version);
|
||||
shardInfo.toXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -62,4 +62,12 @@ public interface DocumentRequest<T> extends IndicesRequest {
|
|||
* @return the Routing
|
||||
*/
|
||||
String routing();
|
||||
|
||||
|
||||
/**
|
||||
* Get the parent for this request
|
||||
* @return the Parent
|
||||
*/
|
||||
String parent();
|
||||
|
||||
}
|
||||
|
|
|
@ -41,9 +41,9 @@ public interface IndicesRequest {
|
|||
IndicesOptions indicesOptions();
|
||||
|
||||
static interface Replaceable extends IndicesRequest {
|
||||
/*
|
||||
* Sets the array of indices that the action relates to
|
||||
/**
|
||||
* Sets the indices that the action relates to.
|
||||
*/
|
||||
IndicesRequest indices(String[] indices);
|
||||
IndicesRequest indices(String... indices);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
|
|
@ -26,10 +26,8 @@ package org.elasticsearch.action;
|
|||
public interface RealtimeRequest {
|
||||
|
||||
/**
|
||||
* @param realtime Controls whether this request should be realtime by reading from the translog. If <code>null</code>
|
||||
* is specified then whether the operation will be realtime depends on the api of the concrete request
|
||||
* subclass.
|
||||
* @param realtime Controls whether this request should be realtime by reading from the translog.
|
||||
*/
|
||||
<R extends RealtimeRequest> R realtime(Boolean realtime);
|
||||
<R extends RealtimeRequest> R realtime(boolean realtime);
|
||||
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action;
|
|||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.bootstrap.Elasticsearch;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -29,26 +28,24 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
/**
|
||||
* Base class for write action responses.
|
||||
*/
|
||||
public class ActionWriteResponse extends ActionResponse {
|
||||
public class ReplicationResponse extends ActionResponse {
|
||||
|
||||
public final static ActionWriteResponse.ShardInfo.Failure[] EMPTY = new ActionWriteResponse.ShardInfo.Failure[0];
|
||||
public final static ReplicationResponse.ShardInfo.Failure[] EMPTY = new ReplicationResponse.ShardInfo.Failure[0];
|
||||
|
||||
private ShardInfo shardInfo;
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shardInfo = ActionWriteResponse.ShardInfo.readShardInfo(in);
|
||||
shardInfo = ReplicationResponse.ShardInfo.readShardInfo(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -172,15 +169,13 @@ public class ActionWriteResponse extends ActionResponse {
|
|||
|
||||
public static class Failure implements ShardOperationFailedException, ToXContent {
|
||||
|
||||
private String index;
|
||||
private int shardId;
|
||||
private ShardId shardId;
|
||||
private String nodeId;
|
||||
private Throwable cause;
|
||||
private RestStatus status;
|
||||
private boolean primary;
|
||||
|
||||
public Failure(String index, int shardId, @Nullable String nodeId, Throwable cause, RestStatus status, boolean primary) {
|
||||
this.index = index;
|
||||
public Failure(ShardId shardId, @Nullable String nodeId, Throwable cause, RestStatus status, boolean primary) {
|
||||
this.shardId = shardId;
|
||||
this.nodeId = nodeId;
|
||||
this.cause = cause;
|
||||
|
@ -196,7 +191,7 @@ public class ActionWriteResponse extends ActionResponse {
|
|||
*/
|
||||
@Override
|
||||
public String index() {
|
||||
return index;
|
||||
return shardId.getIndexName();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -204,6 +199,10 @@ public class ActionWriteResponse extends ActionResponse {
|
|||
*/
|
||||
@Override
|
||||
public int shardId() {
|
||||
return shardId.id();
|
||||
}
|
||||
|
||||
public ShardId fullShardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
|
@ -246,8 +245,7 @@ public class ActionWriteResponse extends ActionResponse {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
index = in.readString();
|
||||
shardId = in.readVInt();
|
||||
shardId = ShardId.readShardId(in);
|
||||
nodeId = in.readOptionalString();
|
||||
cause = in.readThrowable();
|
||||
status = RestStatus.readFrom(in);
|
||||
|
@ -256,8 +254,7 @@ public class ActionWriteResponse extends ActionResponse {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(index);
|
||||
out.writeVInt(shardId);
|
||||
shardId.writeTo(out);
|
||||
out.writeOptionalString(nodeId);
|
||||
out.writeThrowable(cause);
|
||||
RestStatus.writeTo(out, status);
|
||||
|
@ -267,8 +264,8 @@ public class ActionWriteResponse extends ActionResponse {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Fields._INDEX, index);
|
||||
builder.field(Fields._SHARD, shardId);
|
||||
builder.field(Fields._INDEX, shardId.getIndexName());
|
||||
builder.field(Fields._SHARD, shardId.id());
|
||||
builder.field(Fields._NODE, nodeId);
|
||||
builder.field(Fields.REASON);
|
||||
builder.startObject();
|
||||
|
@ -282,24 +279,24 @@ public class ActionWriteResponse extends ActionResponse {
|
|||
|
||||
private static class Fields {
|
||||
|
||||
private static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
|
||||
private static final XContentBuilderString _SHARD = new XContentBuilderString("_shard");
|
||||
private static final XContentBuilderString _NODE = new XContentBuilderString("_node");
|
||||
private static final XContentBuilderString REASON = new XContentBuilderString("reason");
|
||||
private static final XContentBuilderString STATUS = new XContentBuilderString("status");
|
||||
private static final XContentBuilderString PRIMARY = new XContentBuilderString("primary");
|
||||
private static final String _INDEX = "_index";
|
||||
private static final String _SHARD = "_shard";
|
||||
private static final String _NODE = "_node";
|
||||
private static final String REASON = "reason";
|
||||
private static final String STATUS = "status";
|
||||
private static final String PRIMARY = "primary";
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private static class Fields {
|
||||
|
||||
private static final XContentBuilderString _SHARDS = new XContentBuilderString("_shards");
|
||||
private static final XContentBuilderString TOTAL = new XContentBuilderString("total");
|
||||
private static final XContentBuilderString SUCCESSFUL = new XContentBuilderString("successful");
|
||||
private static final XContentBuilderString PENDING = new XContentBuilderString("pending");
|
||||
private static final XContentBuilderString FAILED = new XContentBuilderString("failed");
|
||||
private static final XContentBuilderString FAILURES = new XContentBuilderString("failures");
|
||||
private static final String _SHARDS = "_shards";
|
||||
private static final String TOTAL = "total";
|
||||
private static final String SUCCESSFUL = "successful";
|
||||
private static final String PENDING = "pending";
|
||||
private static final String FAILED = "failed";
|
||||
private static final String FAILURES = "failures";
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.ExceptionsHelper.detailedMessage;
|
||||
|
||||
/**
|
||||
* Information about task operation failures
|
||||
*
|
||||
* The class is final due to serialization limitations
|
||||
*/
|
||||
public final class TaskOperationFailure implements Writeable, ToXContent {
|
||||
|
||||
private final String nodeId;
|
||||
|
||||
private final long taskId;
|
||||
|
||||
private final Throwable reason;
|
||||
|
||||
private final RestStatus status;
|
||||
|
||||
public TaskOperationFailure(String nodeId, long taskId, Throwable t) {
|
||||
this.nodeId = nodeId;
|
||||
this.taskId = taskId;
|
||||
this.reason = t;
|
||||
status = ExceptionsHelper.status(t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public TaskOperationFailure(StreamInput in) throws IOException {
|
||||
nodeId = in.readString();
|
||||
taskId = in.readLong();
|
||||
reason = in.readThrowable();
|
||||
status = RestStatus.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(nodeId);
|
||||
out.writeLong(taskId);
|
||||
out.writeThrowable(reason);
|
||||
RestStatus.writeTo(out, status);
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return this.nodeId;
|
||||
}
|
||||
|
||||
public long getTaskId() {
|
||||
return this.taskId;
|
||||
}
|
||||
|
||||
public String getReason() {
|
||||
return detailedMessage(reason);
|
||||
}
|
||||
|
||||
public RestStatus getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public Throwable getCause() {
|
||||
return reason;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + nodeId + "][" + taskId + "] failed, reason [" + getReason() + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("task_id", getTaskId());
|
||||
builder.field("node_id", getNodeId());
|
||||
builder.field("status", status.name());
|
||||
if (reason != null) {
|
||||
builder.field("reason");
|
||||
builder.startObject();
|
||||
ElasticsearchException.toXContent(builder, params, reason);
|
||||
builder.endObject();
|
||||
}
|
||||
return builder;
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -23,7 +23,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.transport.*;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
* A generic proxy that will execute the given action against a specific node.
|
||||
|
@ -48,11 +49,7 @@ public class TransportActionNodeProxy<Request extends ActionRequest, Response ex
|
|||
listener.onFailure(validationException);
|
||||
return;
|
||||
}
|
||||
transportService.sendRequest(node, action.name(), request, transportOptions, new ActionListenerResponseHandler<Response>(listener) {
|
||||
@Override
|
||||
public Response newInstance() {
|
||||
return action.newResponse();
|
||||
}
|
||||
});
|
||||
transportService.sendRequest(node, action.name(), request, transportOptions,
|
||||
new ActionListenerResponseHandler<>(listener, action::newResponse));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action;
|
|||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.HppcMaps;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -37,13 +36,19 @@ public class UnavailableShardsException extends ElasticsearchException {
|
|||
super(buildMessage(shardId, message), args);
|
||||
}
|
||||
|
||||
public UnavailableShardsException(String index, int shardId, String message, Object... args) {
|
||||
super(buildMessage(index, shardId, message), args);
|
||||
}
|
||||
|
||||
private static String buildMessage(ShardId shardId, String message) {
|
||||
if (shardId == null) {
|
||||
return message;
|
||||
}
|
||||
return "[" + shardId.index().name() + "][" + shardId.id() + "] " + message;
|
||||
return buildMessage(shardId.getIndexName(), shardId.id(), message);
|
||||
}
|
||||
|
||||
private static String buildMessage(String index, int shardId, String message) {return "[" + index + "][" + shardId + "] " + message;}
|
||||
|
||||
public UnavailableShardsException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
|
@ -52,4 +57,4 @@ public class UnavailableShardsException extends ElasticsearchException {
|
|||
public RestStatus status() {
|
||||
return RestStatus.SERVICE_UNAVAILABLE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Action for explaining shard allocation for a shard in the cluster
|
||||
*/
|
||||
public class ClusterAllocationExplainAction extends Action<ClusterAllocationExplainRequest,
|
||||
ClusterAllocationExplainResponse,
|
||||
ClusterAllocationExplainRequestBuilder> {
|
||||
|
||||
public static final ClusterAllocationExplainAction INSTANCE = new ClusterAllocationExplainAction();
|
||||
public static final String NAME = "cluster:monitor/allocation/explain";
|
||||
|
||||
private ClusterAllocationExplainAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterAllocationExplainResponse newResponse() {
|
||||
return new ClusterAllocationExplainResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterAllocationExplainRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ClusterAllocationExplainRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,177 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ParseFieldMatcherSupplier;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* A request to explain the allocation of a shard in the cluster
|
||||
*/
|
||||
public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAllocationExplainRequest> {
|
||||
|
||||
private static ObjectParser<ClusterAllocationExplainRequest, ParseFieldMatcherSupplier> PARSER = new ObjectParser(
|
||||
"cluster/allocation/explain");
|
||||
static {
|
||||
PARSER.declareString(ClusterAllocationExplainRequest::setIndex, new ParseField("index"));
|
||||
PARSER.declareInt(ClusterAllocationExplainRequest::setShard, new ParseField("shard"));
|
||||
PARSER.declareBoolean(ClusterAllocationExplainRequest::setPrimary, new ParseField("primary"));
|
||||
}
|
||||
|
||||
private String index;
|
||||
private Integer shard;
|
||||
private Boolean primary;
|
||||
private boolean includeYesDecisions = false;
|
||||
|
||||
/** Explain the first unassigned shard */
|
||||
public ClusterAllocationExplainRequest() {
|
||||
this.index = null;
|
||||
this.shard = null;
|
||||
this.primary = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new allocation explain request. If {@code primary} is false, the first unassigned replica
|
||||
* will be picked for explanation. If no replicas are unassigned, the first assigned replica will
|
||||
* be explained.
|
||||
*/
|
||||
public ClusterAllocationExplainRequest(String index, int shard, boolean primary) {
|
||||
this.index = index;
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (this.useAnyUnassignedShard() == false) {
|
||||
if (this.index == null) {
|
||||
validationException = addValidationError("index must be specified", validationException);
|
||||
}
|
||||
if (this.shard == null) {
|
||||
validationException = addValidationError("shard must be specified", validationException);
|
||||
}
|
||||
if (this.primary == null) {
|
||||
validationException = addValidationError("primary must be specified", validationException);
|
||||
}
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} iff the first unassigned shard is to be used
|
||||
*/
|
||||
public boolean useAnyUnassignedShard() {
|
||||
return this.index == null && this.shard == null && this.primary == null;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplainRequest setIndex(String index) {
|
||||
this.index = index;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public String getIndex() {
|
||||
return this.index;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplainRequest setShard(Integer shard) {
|
||||
this.shard = shard;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public Integer getShard() {
|
||||
return this.shard;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplainRequest setPrimary(Boolean primary) {
|
||||
this.primary = primary;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public Boolean isPrimary() {
|
||||
return this.primary;
|
||||
}
|
||||
|
||||
public void includeYesDecisions(boolean includeYesDecisions) {
|
||||
this.includeYesDecisions = includeYesDecisions;
|
||||
}
|
||||
|
||||
/** Returns true if all decisions should be included. Otherwise only "NO" and "THROTTLE" decisions are returned */
|
||||
public boolean includeYesDecisions() {
|
||||
return this.includeYesDecisions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("ClusterAllocationExplainRequest[");
|
||||
if (this.useAnyUnassignedShard()) {
|
||||
sb.append("useAnyUnassignedShard=true");
|
||||
} else {
|
||||
sb.append("index=").append(index);
|
||||
sb.append(",shard=").append(shard);
|
||||
sb.append(",primary?=").append(primary);
|
||||
}
|
||||
sb.append(",includeYesDecisions?=").append(includeYesDecisions);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public static ClusterAllocationExplainRequest parse(XContentParser parser) throws IOException {
|
||||
ClusterAllocationExplainRequest req = PARSER.parse(parser, new ClusterAllocationExplainRequest(), () -> ParseFieldMatcher.STRICT);
|
||||
Exception e = req.validate();
|
||||
if (e != null) {
|
||||
throw new ElasticsearchParseException("'index', 'shard', and 'primary' must be specified in allocation explain request", e);
|
||||
}
|
||||
return req;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
this.index = in.readOptionalString();
|
||||
this.shard = in.readOptionalVInt();
|
||||
this.primary = in.readOptionalBoolean();
|
||||
this.includeYesDecisions = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeOptionalString(index);
|
||||
out.writeOptionalVInt(shard);
|
||||
out.writeOptionalBoolean(primary);
|
||||
out.writeBoolean(includeYesDecisions);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Builder for requests to explain the allocation of a shard in the cluster
|
||||
*/
|
||||
public class ClusterAllocationExplainRequestBuilder
|
||||
extends MasterNodeOperationRequestBuilder<ClusterAllocationExplainRequest,
|
||||
ClusterAllocationExplainResponse,
|
||||
ClusterAllocationExplainRequestBuilder> {
|
||||
|
||||
public ClusterAllocationExplainRequestBuilder(ElasticsearchClient client, ClusterAllocationExplainAction action) {
|
||||
super(client, action, new ClusterAllocationExplainRequest());
|
||||
}
|
||||
|
||||
/** The index name to use when finding the shard to explain */
|
||||
public ClusterAllocationExplainRequestBuilder setIndex(String index) {
|
||||
request.setIndex(index);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** The shard number to use when finding the shard to explain */
|
||||
public ClusterAllocationExplainRequestBuilder setShard(int shard) {
|
||||
request.setShard(shard);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Whether the primary or replica should be explained */
|
||||
public ClusterAllocationExplainRequestBuilder setPrimary(boolean primary) {
|
||||
request.setPrimary(primary);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Signal that the first unassigned shard should be used
|
||||
*/
|
||||
public ClusterAllocationExplainRequestBuilder useAnyUnassignedShard() {
|
||||
request.setIndex(null);
|
||||
request.setShard(null);
|
||||
request.setPrimary(null);
|
||||
return this;
|
||||
}
|
||||
|
||||
}
|
|
@ -17,46 +17,45 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.termvectors.dfs;
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
* Explanation response for a shard in the cluster
|
||||
*/
|
||||
class ShardDfsOnlyResponse extends BroadcastShardResponse {
|
||||
public class ClusterAllocationExplainResponse extends ActionResponse {
|
||||
|
||||
private DfsSearchResult dfsSearchResult = new DfsSearchResult();
|
||||
|
||||
ShardDfsOnlyResponse() {
|
||||
private ClusterAllocationExplanation cae;
|
||||
|
||||
public ClusterAllocationExplainResponse() {
|
||||
}
|
||||
|
||||
ShardDfsOnlyResponse(ShardId shardId, DfsSearchResult dfsSearchResult) {
|
||||
super(shardId);
|
||||
this.dfsSearchResult = dfsSearchResult;
|
||||
public ClusterAllocationExplainResponse(ClusterAllocationExplanation cae) {
|
||||
this.cae = cae;
|
||||
}
|
||||
|
||||
public DfsSearchResult getDfsSearchResult() {
|
||||
return dfsSearchResult;
|
||||
/**
|
||||
* Return the explanation for shard allocation in the cluster
|
||||
*/
|
||||
public ClusterAllocationExplanation getExplanation() {
|
||||
return this.cae;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
dfsSearchResult.readFrom(in);
|
||||
this.cae = new ClusterAllocationExplanation(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
dfsSearchResult.writeTo(out);
|
||||
cae.writeTo(out);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,259 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A {@code ClusterAllocationExplanation} is an explanation of why a shard may or may not be allocated to nodes. It also includes weights
|
||||
* for where the shard is likely to be assigned. It is an immutable class
|
||||
*/
|
||||
public final class ClusterAllocationExplanation implements ToXContent, Writeable {
|
||||
|
||||
private final ShardId shard;
|
||||
private final boolean primary;
|
||||
private final String assignedNodeId;
|
||||
private final UnassignedInfo unassignedInfo;
|
||||
private final long remainingDelayMillis;
|
||||
private final Map<DiscoveryNode, NodeExplanation> nodeExplanations;
|
||||
|
||||
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long remainingDelayMillis,
|
||||
@Nullable UnassignedInfo unassignedInfo, Map<DiscoveryNode, NodeExplanation> nodeExplanations) {
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
this.assignedNodeId = assignedNodeId;
|
||||
this.unassignedInfo = unassignedInfo;
|
||||
this.remainingDelayMillis = remainingDelayMillis;
|
||||
this.nodeExplanations = nodeExplanations;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation(StreamInput in) throws IOException {
|
||||
this.shard = ShardId.readShardId(in);
|
||||
this.primary = in.readBoolean();
|
||||
this.assignedNodeId = in.readOptionalString();
|
||||
this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
|
||||
this.remainingDelayMillis = in.readVLong();
|
||||
|
||||
int mapSize = in.readVInt();
|
||||
Map<DiscoveryNode, NodeExplanation> nodeToExplanation = new HashMap<>(mapSize);
|
||||
for (int i = 0; i < mapSize; i++) {
|
||||
NodeExplanation nodeExplanation = new NodeExplanation(in);
|
||||
nodeToExplanation.put(nodeExplanation.getNode(), nodeExplanation);
|
||||
}
|
||||
this.nodeExplanations = nodeToExplanation;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
this.getShard().writeTo(out);
|
||||
out.writeBoolean(this.isPrimary());
|
||||
out.writeOptionalString(this.getAssignedNodeId());
|
||||
out.writeOptionalWriteable(this.getUnassignedInfo());
|
||||
out.writeVLong(remainingDelayMillis);
|
||||
|
||||
out.writeVInt(this.nodeExplanations.size());
|
||||
for (NodeExplanation explanation : this.nodeExplanations.values()) {
|
||||
explanation.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
/** Return the shard that the explanation is about */
|
||||
public ShardId getShard() {
|
||||
return this.shard;
|
||||
}
|
||||
|
||||
/** Return true if the explained shard is primary, false otherwise */
|
||||
public boolean isPrimary() {
|
||||
return this.primary;
|
||||
}
|
||||
|
||||
/** Return turn if the shard is assigned to a node */
|
||||
public boolean isAssigned() {
|
||||
return this.assignedNodeId != null;
|
||||
}
|
||||
|
||||
/** Return the assigned node id or null if not assigned */
|
||||
@Nullable
|
||||
public String getAssignedNodeId() {
|
||||
return this.assignedNodeId;
|
||||
}
|
||||
|
||||
/** Return the unassigned info for the shard or null if the shard is assigned */
|
||||
@Nullable
|
||||
public UnassignedInfo getUnassignedInfo() {
|
||||
return this.unassignedInfo;
|
||||
}
|
||||
|
||||
/** Return the remaining allocation delay for this shard in millisocends */
|
||||
public long getRemainingDelayMillis() {
|
||||
return this.remainingDelayMillis;
|
||||
}
|
||||
|
||||
/** Return a map of node to the explanation for that node */
|
||||
public Map<DiscoveryNode, NodeExplanation> getNodeExplanations() {
|
||||
return this.nodeExplanations;
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(); {
|
||||
builder.startObject("shard"); {
|
||||
builder.field("index", shard.getIndexName());
|
||||
builder.field("index_uuid", shard.getIndex().getUUID());
|
||||
builder.field("id", shard.getId());
|
||||
builder.field("primary", primary);
|
||||
}
|
||||
builder.endObject(); // end shard
|
||||
builder.field("assigned", this.assignedNodeId != null);
|
||||
// If assigned, show the node id of the node it's assigned to
|
||||
if (assignedNodeId != null) {
|
||||
builder.field("assigned_node_id", this.assignedNodeId);
|
||||
}
|
||||
// If we have unassigned info, show that
|
||||
if (unassignedInfo != null) {
|
||||
unassignedInfo.toXContent(builder, params);
|
||||
long delay = unassignedInfo.getLastComputedLeftDelayNanos();
|
||||
builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueNanos(delay));
|
||||
builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis));
|
||||
}
|
||||
builder.startObject("nodes");
|
||||
for (NodeExplanation explanation : nodeExplanations.values()) {
|
||||
explanation.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject(); // end nodes
|
||||
}
|
||||
builder.endObject(); // end wrapping object
|
||||
return builder;
|
||||
}
|
||||
|
||||
/** An Enum representing the final decision for a shard allocation on a node */
|
||||
public enum FinalDecision {
|
||||
// Yes, the shard can be assigned
|
||||
YES((byte) 0),
|
||||
// No, the shard cannot be assigned
|
||||
NO((byte) 1),
|
||||
// The shard is already assigned to this node
|
||||
ALREADY_ASSIGNED((byte) 2);
|
||||
|
||||
private final byte id;
|
||||
|
||||
FinalDecision (byte id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
private static FinalDecision fromId(byte id) {
|
||||
switch (id) {
|
||||
case 0: return YES;
|
||||
case 1: return NO;
|
||||
case 2: return ALREADY_ASSIGNED;
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for final decision: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
switch (id) {
|
||||
case 0: return "YES";
|
||||
case 1: return "NO";
|
||||
case 2: return "ALREADY_ASSIGNED";
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for final decision: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
static FinalDecision readFrom(StreamInput in) throws IOException {
|
||||
return fromId(in.readByte());
|
||||
}
|
||||
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(id);
|
||||
}
|
||||
}
|
||||
|
||||
/** An Enum representing the state of the shard store's copy of the data on a node */
|
||||
public enum StoreCopy {
|
||||
// No data for this shard is on the node
|
||||
NONE((byte) 0),
|
||||
// A copy of the data is available on this node
|
||||
AVAILABLE((byte) 1),
|
||||
// The copy of the data on the node is corrupt
|
||||
CORRUPT((byte) 2),
|
||||
// There was an error reading this node's copy of the data
|
||||
IO_ERROR((byte) 3),
|
||||
// The copy of the data on the node is stale
|
||||
STALE((byte) 4),
|
||||
// It's unknown what the copy of the data is
|
||||
UNKNOWN((byte) 5);
|
||||
|
||||
private final byte id;
|
||||
|
||||
StoreCopy (byte id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
private static StoreCopy fromId(byte id) {
|
||||
switch (id) {
|
||||
case 0: return NONE;
|
||||
case 1: return AVAILABLE;
|
||||
case 2: return CORRUPT;
|
||||
case 3: return IO_ERROR;
|
||||
case 4: return STALE;
|
||||
case 5: return UNKNOWN;
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for store copy: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
switch (id) {
|
||||
case 0: return "NONE";
|
||||
case 1: return "AVAILABLE";
|
||||
case 2: return "CORRUPT";
|
||||
case 3: return "IO_ERROR";
|
||||
case 4: return "STALE";
|
||||
case 5: return "UNKNOWN";
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for store copy: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
static StoreCopy readFrom(StreamInput in) throws IOException {
|
||||
return fromId(in.readByte());
|
||||
}
|
||||
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(id);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
/** The cluster allocation explanation for a single node */
|
||||
public class NodeExplanation implements Writeable, ToXContent {
|
||||
private final DiscoveryNode node;
|
||||
private final Decision nodeDecision;
|
||||
private final Float nodeWeight;
|
||||
private final IndicesShardStoresResponse.StoreStatus storeStatus;
|
||||
private final ClusterAllocationExplanation.FinalDecision finalDecision;
|
||||
private final ClusterAllocationExplanation.StoreCopy storeCopy;
|
||||
private final String finalExplanation;
|
||||
|
||||
public NodeExplanation(final DiscoveryNode node, final Decision nodeDecision, final Float nodeWeight,
|
||||
final @Nullable IndicesShardStoresResponse.StoreStatus storeStatus,
|
||||
final ClusterAllocationExplanation.FinalDecision finalDecision,
|
||||
final String finalExplanation,
|
||||
final ClusterAllocationExplanation.StoreCopy storeCopy) {
|
||||
this.node = node;
|
||||
this.nodeDecision = nodeDecision;
|
||||
this.nodeWeight = nodeWeight;
|
||||
this.storeStatus = storeStatus;
|
||||
this.finalDecision = finalDecision;
|
||||
this.finalExplanation = finalExplanation;
|
||||
this.storeCopy = storeCopy;
|
||||
}
|
||||
|
||||
public NodeExplanation(StreamInput in) throws IOException {
|
||||
this.node = new DiscoveryNode(in);
|
||||
this.nodeDecision = Decision.readFrom(in);
|
||||
this.nodeWeight = in.readFloat();
|
||||
if (in.readBoolean()) {
|
||||
this.storeStatus = IndicesShardStoresResponse.StoreStatus.readStoreStatus(in);
|
||||
} else {
|
||||
this.storeStatus = null;
|
||||
}
|
||||
this.finalDecision = ClusterAllocationExplanation.FinalDecision.readFrom(in);
|
||||
this.finalExplanation = in.readString();
|
||||
this.storeCopy = ClusterAllocationExplanation.StoreCopy.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
Decision.writeTo(nodeDecision, out);
|
||||
out.writeFloat(nodeWeight);
|
||||
if (storeStatus == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
storeStatus.writeTo(out);
|
||||
}
|
||||
finalDecision.writeTo(out);
|
||||
out.writeString(finalExplanation);
|
||||
storeCopy.writeTo(out);
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(node.getId()); {
|
||||
builder.field("node_name", node.getName());
|
||||
builder.startObject("node_attributes"); {
|
||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||
builder.field(attrEntry.getKey(), attrEntry.getValue());
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end attributes
|
||||
builder.startObject("store"); {
|
||||
builder.field("shard_copy", storeCopy.toString());
|
||||
if (storeStatus != null) {
|
||||
final Throwable storeErr = storeStatus.getStoreException();
|
||||
if (storeErr != null) {
|
||||
builder.field("store_exception", ExceptionsHelper.detailedMessage(storeErr));
|
||||
}
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end store
|
||||
builder.field("final_decision", finalDecision.toString());
|
||||
builder.field("final_explanation", finalExplanation.toString());
|
||||
builder.field("weight", nodeWeight);
|
||||
nodeDecision.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject(); // end node <uuid>
|
||||
return builder;
|
||||
}
|
||||
|
||||
public DiscoveryNode getNode() {
|
||||
return this.node;
|
||||
}
|
||||
|
||||
public Decision getDecision() {
|
||||
return this.nodeDecision;
|
||||
}
|
||||
|
||||
public Float getWeight() {
|
||||
return this.nodeWeight;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public IndicesShardStoresResponse.StoreStatus getStoreStatus() {
|
||||
return this.storeStatus;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation.FinalDecision getFinalDecision() {
|
||||
return this.finalDecision;
|
||||
}
|
||||
|
||||
public String getFinalExplanation() {
|
||||
return this.finalExplanation;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation.StoreCopy getStoreCopy() {
|
||||
return this.storeCopy;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,326 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData.Custom;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes.RoutingNodesIterator;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the
|
||||
* master node in the cluster.
|
||||
*/
|
||||
public class TransportClusterAllocationExplainAction
|
||||
extends TransportMasterNodeAction<ClusterAllocationExplainRequest, ClusterAllocationExplainResponse> {
|
||||
|
||||
private final AllocationService allocationService;
|
||||
private final ClusterInfoService clusterInfoService;
|
||||
private final AllocationDeciders allocationDeciders;
|
||||
private final ShardsAllocator shardAllocator;
|
||||
private final TransportIndicesShardStoresAction shardStoresAction;
|
||||
|
||||
@Inject
|
||||
public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AllocationService allocationService, ClusterInfoService clusterInfoService,
|
||||
AllocationDeciders allocationDeciders, ShardsAllocator shardAllocator,
|
||||
TransportIndicesShardStoresAction shardStoresAction) {
|
||||
super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
|
||||
this.allocationService = allocationService;
|
||||
this.clusterInfoService = clusterInfoService;
|
||||
this.allocationDeciders = allocationDeciders;
|
||||
this.shardAllocator = shardAllocator;
|
||||
this.shardStoresAction = shardStoresAction;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
return ThreadPool.Names.MANAGEMENT;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(ClusterAllocationExplainRequest request, ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterAllocationExplainResponse newResponse() {
|
||||
return new ClusterAllocationExplainResponse();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the decisions for the given {@code ShardRouting} on the given {@code RoutingNode}. If {@code includeYesDecisions} is not true,
|
||||
* only non-YES (NO and THROTTLE) decisions are returned.
|
||||
*/
|
||||
public static Decision tryShardOnNode(ShardRouting shard, RoutingNode node, RoutingAllocation allocation, boolean includeYesDecisions) {
|
||||
Decision d = allocation.deciders().canAllocate(shard, node, allocation);
|
||||
if (includeYesDecisions) {
|
||||
return d;
|
||||
} else {
|
||||
Decision.Multi nonYesDecisions = new Decision.Multi();
|
||||
List<Decision> decisions = d.getDecisions();
|
||||
for (Decision decision : decisions) {
|
||||
if (decision.type() != Decision.Type.YES) {
|
||||
nonYesDecisions.add(decision);
|
||||
}
|
||||
}
|
||||
return nonYesDecisions;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a {@code NodeExplanation} object for the given shard given all the metadata. This also attempts to construct the human
|
||||
* readable FinalDecision and final explanation as part of the explanation.
|
||||
*/
|
||||
public static NodeExplanation calculateNodeExplanation(ShardRouting shard,
|
||||
IndexMetaData indexMetaData,
|
||||
DiscoveryNode node,
|
||||
Decision nodeDecision,
|
||||
Float nodeWeight,
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus,
|
||||
String assignedNodeId,
|
||||
Set<String> activeAllocationIds) {
|
||||
final ClusterAllocationExplanation.FinalDecision finalDecision;
|
||||
final ClusterAllocationExplanation.StoreCopy storeCopy;
|
||||
final String finalExplanation;
|
||||
|
||||
if (storeStatus == null) {
|
||||
// No copies of the data
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.NONE;
|
||||
} else {
|
||||
final Throwable storeErr = storeStatus.getStoreException();
|
||||
if (storeErr != null) {
|
||||
if (ExceptionsHelper.unwrapCause(storeErr) instanceof CorruptIndexException) {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.CORRUPT;
|
||||
} else {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.IO_ERROR;
|
||||
}
|
||||
} else if (activeAllocationIds.isEmpty()) {
|
||||
// The ids are only empty if dealing with a legacy index
|
||||
// TODO: fetch the shard state versions and display here?
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.UNKNOWN;
|
||||
} else if (activeAllocationIds.contains(storeStatus.getAllocationId())) {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.AVAILABLE;
|
||||
} else {
|
||||
// Otherwise, this is a stale copy of the data (allocation ids don't match)
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.STALE;
|
||||
}
|
||||
}
|
||||
|
||||
if (node.getId().equals(assignedNodeId)) {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED;
|
||||
finalExplanation = "the shard is already assigned to this node";
|
||||
} else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) {
|
||||
finalExplanation = "the copy of the shard is stale, allocation ids do not match";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.NONE) {
|
||||
finalExplanation = "there is no copy of the shard available";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && storeCopy == ClusterAllocationExplanation.StoreCopy.CORRUPT) {
|
||||
finalExplanation = "the copy of the shard is corrupt";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && storeCopy == ClusterAllocationExplanation.StoreCopy.IO_ERROR) {
|
||||
finalExplanation = "the copy of the shard cannot be read";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else {
|
||||
if (nodeDecision.type() == Decision.Type.NO) {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
finalExplanation = "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision";
|
||||
} else {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.YES;
|
||||
if (storeCopy == ClusterAllocationExplanation.StoreCopy.AVAILABLE) {
|
||||
finalExplanation = "the shard can be assigned and the node contains a valid copy of the shard data";
|
||||
} else {
|
||||
finalExplanation = "the shard can be assigned";
|
||||
}
|
||||
}
|
||||
}
|
||||
return new NodeExplanation(node, nodeDecision, nodeWeight, storeStatus, finalDecision, finalExplanation, storeCopy);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* For the given {@code ShardRouting}, return the explanation of the allocation for that shard on all nodes. If {@code
|
||||
* includeYesDecisions} is true, returns all decisions, otherwise returns only 'NO' and 'THROTTLE' decisions.
|
||||
*/
|
||||
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
|
||||
boolean includeYesDecisions, ShardsAllocator shardAllocator,
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStores) {
|
||||
// don't short circuit deciders, we want a full explanation
|
||||
allocation.debugDecision(true);
|
||||
// get the existing unassigned info if available
|
||||
UnassignedInfo ui = shard.unassignedInfo();
|
||||
|
||||
RoutingNodesIterator iter = routingNodes.nodes();
|
||||
Map<DiscoveryNode, Decision> nodeToDecision = new HashMap<>();
|
||||
while (iter.hasNext()) {
|
||||
RoutingNode node = iter.next();
|
||||
DiscoveryNode discoNode = node.node();
|
||||
if (discoNode.isDataNode()) {
|
||||
Decision d = tryShardOnNode(shard, node, allocation, includeYesDecisions);
|
||||
nodeToDecision.put(discoNode, d);
|
||||
}
|
||||
}
|
||||
long remainingDelayMillis = 0;
|
||||
final MetaData metadata = allocation.metaData();
|
||||
final IndexMetaData indexMetaData = metadata.index(shard.index());
|
||||
if (ui != null) {
|
||||
final Settings indexSettings = indexMetaData.getSettings();
|
||||
long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), metadata.settings(), indexSettings);
|
||||
remainingDelayMillis = TimeValue.timeValueNanos(remainingDelayNanos).millis();
|
||||
}
|
||||
|
||||
// Calculate weights for each of the nodes
|
||||
Map<DiscoveryNode, Float> weights = shardAllocator.weighShard(allocation, shard);
|
||||
|
||||
Map<DiscoveryNode, IndicesShardStoresResponse.StoreStatus> nodeToStatus = new HashMap<>(shardStores.size());
|
||||
for (IndicesShardStoresResponse.StoreStatus status : shardStores) {
|
||||
nodeToStatus.put(status.getNode(), status);
|
||||
}
|
||||
|
||||
Map<DiscoveryNode, NodeExplanation> explanations = new HashMap<>(shardStores.size());
|
||||
for (Map.Entry<DiscoveryNode, Decision> entry : nodeToDecision.entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
Decision decision = entry.getValue();
|
||||
Float weight = weights.get(node);
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node);
|
||||
NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight,
|
||||
storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()));
|
||||
explanations.put(node, nodeExplanation);
|
||||
}
|
||||
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
|
||||
shard.currentNodeId(), remainingDelayMillis, ui, explanations);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
|
||||
final ActionListener<ClusterAllocationExplainResponse> listener) {
|
||||
final RoutingNodes routingNodes = state.getRoutingNodes();
|
||||
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state.nodes(),
|
||||
clusterInfoService.getClusterInfo(), System.nanoTime());
|
||||
|
||||
ShardRouting foundShard = null;
|
||||
if (request.useAnyUnassignedShard()) {
|
||||
// If we can use any shard, just pick the first unassigned one (if there are any)
|
||||
RoutingNodes.UnassignedShards.UnassignedIterator ui = routingNodes.unassigned().iterator();
|
||||
if (ui.hasNext()) {
|
||||
foundShard = ui.next();
|
||||
}
|
||||
} else {
|
||||
String index = request.getIndex();
|
||||
int shard = request.getShard();
|
||||
if (request.isPrimary()) {
|
||||
// If we're looking for the primary shard, there's only one copy, so pick it directly
|
||||
foundShard = allocation.routingTable().shardRoutingTable(index, shard).primaryShard();
|
||||
} else {
|
||||
// If looking for a replica, go through all the replica shards
|
||||
List<ShardRouting> replicaShardRoutings = allocation.routingTable().shardRoutingTable(index, shard).replicaShards();
|
||||
if (replicaShardRoutings.size() > 0) {
|
||||
// Pick the first replica at the very least
|
||||
foundShard = replicaShardRoutings.get(0);
|
||||
// In case there are multiple replicas where some are assigned and some aren't,
|
||||
// try to find one that is unassigned at least
|
||||
for (ShardRouting replica : replicaShardRoutings) {
|
||||
if (replica.unassigned()) {
|
||||
foundShard = replica;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (foundShard == null) {
|
||||
listener.onFailure(new ElasticsearchException("unable to find any shards to explain [{}] in the routing table", request));
|
||||
return;
|
||||
}
|
||||
final ShardRouting shardRouting = foundShard;
|
||||
logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting);
|
||||
|
||||
getShardStores(shardRouting, new ActionListener<IndicesShardStoresResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndicesShardStoresResponse shardStoreResponse) {
|
||||
ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>> shardStatuses =
|
||||
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStoreStatus = shardStatuses.get(shardRouting.id());
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
|
||||
request.includeYesDecisions(), shardAllocator, shardStoreStatus);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void getShardStores(ShardRouting shard, final ActionListener<IndicesShardStoresResponse> listener) {
|
||||
IndicesShardStoresRequest request = new IndicesShardStoresRequest(shard.getIndexName());
|
||||
request.shardStatuses("all");
|
||||
shardStoresAction.execute(request, listener);
|
||||
}
|
||||
}
|
|
@ -61,7 +61,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
|||
}
|
||||
|
||||
@Override
|
||||
public ClusterHealthRequest indices(String[] indices) {
|
||||
public ClusterHealthRequest indices(String... indices) {
|
||||
this.indices = indices;
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -19,23 +19,20 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.health;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterStateHealth;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterIndexHealth;
|
||||
import org.elasticsearch.cluster.health.ClusterStateHealth;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -83,14 +80,6 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
return clusterStateHealth;
|
||||
}
|
||||
|
||||
/**
|
||||
* The validation failures on the cluster level (without index validation failures).
|
||||
*/
|
||||
public List<String> getValidationFailures() {
|
||||
return clusterStateHealth.getValidationFailures();
|
||||
}
|
||||
|
||||
|
||||
public int getActiveShards() {
|
||||
return clusterStateHealth.getActiveShards();
|
||||
}
|
||||
|
@ -197,9 +186,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
numberOfPendingTasks = in.readInt();
|
||||
timedOut = in.readBoolean();
|
||||
numberOfInFlightFetch = in.readInt();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_7_0)) {
|
||||
delayedUnassignedShards= in.readInt();
|
||||
}
|
||||
delayedUnassignedShards= in.readInt();
|
||||
taskMaxWaitingTime = TimeValue.readTimeValue(in);
|
||||
}
|
||||
|
||||
|
@ -212,9 +199,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
out.writeInt(numberOfPendingTasks);
|
||||
out.writeBoolean(timedOut);
|
||||
out.writeInt(numberOfInFlightFetch);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_7_0)) {
|
||||
out.writeInt(delayedUnassignedShards);
|
||||
}
|
||||
out.writeInt(delayedUnassignedShards);
|
||||
taskMaxWaitingTime.writeTo(out);
|
||||
}
|
||||
|
||||
|
@ -238,25 +223,24 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name");
|
||||
static final XContentBuilderString STATUS = new XContentBuilderString("status");
|
||||
static final XContentBuilderString TIMED_OUT = new XContentBuilderString("timed_out");
|
||||
static final XContentBuilderString NUMBER_OF_NODES = new XContentBuilderString("number_of_nodes");
|
||||
static final XContentBuilderString NUMBER_OF_DATA_NODES = new XContentBuilderString("number_of_data_nodes");
|
||||
static final XContentBuilderString NUMBER_OF_PENDING_TASKS = new XContentBuilderString("number_of_pending_tasks");
|
||||
static final XContentBuilderString NUMBER_OF_IN_FLIGHT_FETCH = new XContentBuilderString("number_of_in_flight_fetch");
|
||||
static final XContentBuilderString DELAYED_UNASSIGNED_SHARDS = new XContentBuilderString("delayed_unassigned_shards");
|
||||
static final XContentBuilderString TASK_MAX_WAIT_TIME_IN_QUEUE = new XContentBuilderString("task_max_waiting_in_queue");
|
||||
static final XContentBuilderString TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS = new XContentBuilderString("task_max_waiting_in_queue_millis");
|
||||
static final XContentBuilderString ACTIVE_SHARDS_PERCENT_AS_NUMBER = new XContentBuilderString("active_shards_percent_as_number");
|
||||
static final XContentBuilderString ACTIVE_SHARDS_PERCENT = new XContentBuilderString("active_shards_percent");
|
||||
static final XContentBuilderString ACTIVE_PRIMARY_SHARDS = new XContentBuilderString("active_primary_shards");
|
||||
static final XContentBuilderString ACTIVE_SHARDS = new XContentBuilderString("active_shards");
|
||||
static final XContentBuilderString RELOCATING_SHARDS = new XContentBuilderString("relocating_shards");
|
||||
static final XContentBuilderString INITIALIZING_SHARDS = new XContentBuilderString("initializing_shards");
|
||||
static final XContentBuilderString UNASSIGNED_SHARDS = new XContentBuilderString("unassigned_shards");
|
||||
static final XContentBuilderString VALIDATION_FAILURES = new XContentBuilderString("validation_failures");
|
||||
static final XContentBuilderString INDICES = new XContentBuilderString("indices");
|
||||
static final String CLUSTER_NAME = "cluster_name";
|
||||
static final String STATUS = "status";
|
||||
static final String TIMED_OUT = "timed_out";
|
||||
static final String NUMBER_OF_NODES = "number_of_nodes";
|
||||
static final String NUMBER_OF_DATA_NODES = "number_of_data_nodes";
|
||||
static final String NUMBER_OF_PENDING_TASKS = "number_of_pending_tasks";
|
||||
static final String NUMBER_OF_IN_FLIGHT_FETCH = "number_of_in_flight_fetch";
|
||||
static final String DELAYED_UNASSIGNED_SHARDS = "delayed_unassigned_shards";
|
||||
static final String TASK_MAX_WAIT_TIME_IN_QUEUE = "task_max_waiting_in_queue";
|
||||
static final String TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS = "task_max_waiting_in_queue_millis";
|
||||
static final String ACTIVE_SHARDS_PERCENT_AS_NUMBER = "active_shards_percent_as_number";
|
||||
static final String ACTIVE_SHARDS_PERCENT = "active_shards_percent";
|
||||
static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards";
|
||||
static final String ACTIVE_SHARDS = "active_shards";
|
||||
static final String RELOCATING_SHARDS = "relocating_shards";
|
||||
static final String INITIALIZING_SHARDS = "initializing_shards";
|
||||
static final String UNASSIGNED_SHARDS = "unassigned_shards";
|
||||
static final String INDICES = "indices";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -280,36 +264,10 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
String level = params.param("level", "cluster");
|
||||
boolean outputIndices = "indices".equals(level) || "shards".equals(level);
|
||||
|
||||
|
||||
if (!getValidationFailures().isEmpty()) {
|
||||
builder.startArray(Fields.VALIDATION_FAILURES);
|
||||
for (String validationFailure : getValidationFailures()) {
|
||||
builder.value(validationFailure);
|
||||
}
|
||||
// if we don't print index level information, still print the index validation failures
|
||||
// so we know why the status is red
|
||||
if (!outputIndices) {
|
||||
for (ClusterIndexHealth indexHealth : clusterStateHealth.getIndices().values()) {
|
||||
builder.startObject(indexHealth.getIndex());
|
||||
|
||||
if (!indexHealth.getValidationFailures().isEmpty()) {
|
||||
builder.startArray(Fields.VALIDATION_FAILURES);
|
||||
for (String validationFailure : indexHealth.getValidationFailures()) {
|
||||
builder.value(validationFailure);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
||||
if (outputIndices) {
|
||||
builder.startObject(Fields.INDICES);
|
||||
for (ClusterIndexHealth indexHealth : clusterStateHealth.getIndices().values()) {
|
||||
builder.startObject(indexHealth.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.startObject(indexHealth.getIndex());
|
||||
indexHealth.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
|
|
|
@ -23,17 +23,22 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -71,7 +76,13 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
|
||||
protected final void masterOperation(ClusterHealthRequest request, ClusterState state, ActionListener<ClusterHealthResponse> listener) throws Exception {
|
||||
logger.warn("attempt to execute a cluster health operation without a task");
|
||||
throw new UnsupportedOperationException("task parameter is required for this operation");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(Task task, final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
|
||||
if (request.waitForEvents() != null) {
|
||||
final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis();
|
||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) {
|
||||
|
@ -91,7 +102,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
@Override
|
||||
public void onNoLongerMaster(String source) {
|
||||
logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents());
|
||||
doExecute(request, listener);
|
||||
doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -130,9 +141,9 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
}
|
||||
|
||||
assert waitFor >= 0;
|
||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger);
|
||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext());
|
||||
final ClusterState state = observer.observedState();
|
||||
if (waitFor == 0 || request.timeout().millis() == 0) {
|
||||
if (request.timeout().millis() == 0) {
|
||||
listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0));
|
||||
return;
|
||||
}
|
||||
|
@ -202,7 +213,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
}
|
||||
if (request.indices() != null && request.indices().length > 0) {
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), request.indices());
|
||||
indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.strictExpand(), request.indices());
|
||||
waitForCounter++;
|
||||
} catch (IndexNotFoundException e) {
|
||||
response.setStatus(ClusterHealthStatus.RED); // no indices, make sure its RED
|
||||
|
@ -269,7 +280,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
|
||||
String[] concreteIndices;
|
||||
try {
|
||||
concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
|
||||
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
|
||||
} catch (IndexNotFoundException e) {
|
||||
// one of the specified indices is not there - treat it as RED.
|
||||
ClusterHealthResponse response = new ClusterHealthResponse(clusterName.value(), Strings.EMPTY_ARRAY, clusterState,
|
||||
|
|
|
@ -19,9 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.node.hotthreads;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ClusterAdminClient;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
|
|
|
@ -24,8 +24,8 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
import org.elasticsearch.action.support.nodes.TransportNodesAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -102,7 +102,7 @@ public class TransportNodesHotThreadsAction extends TransportNodesAction<NodesHo
|
|||
}
|
||||
|
||||
NodeRequest(String nodeId, NodesHotThreadsRequest request) {
|
||||
super(request, nodeId);
|
||||
super(nodeId);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.http.HttpInfo;
|
||||
import org.elasticsearch.ingest.core.IngestInfo;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.os.OsInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessInfo;
|
||||
|
@ -74,12 +75,15 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
@Nullable
|
||||
private PluginsAndModules plugins;
|
||||
|
||||
NodeInfo() {
|
||||
@Nullable
|
||||
private IngestInfo ingest;
|
||||
|
||||
public NodeInfo() {
|
||||
}
|
||||
|
||||
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings,
|
||||
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins) {
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest) {
|
||||
super(node);
|
||||
this.version = version;
|
||||
this.build = build;
|
||||
|
@ -92,6 +96,7 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
this.transport = transport;
|
||||
this.http = http;
|
||||
this.plugins = plugins;
|
||||
this.ingest = ingest;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -176,6 +181,11 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
return this.plugins;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public IngestInfo getIngest() {
|
||||
return ingest;
|
||||
}
|
||||
|
||||
public static NodeInfo readNodeInfo(StreamInput in) throws IOException {
|
||||
NodeInfo nodeInfo = new NodeInfo();
|
||||
nodeInfo.readFrom(in);
|
||||
|
@ -220,6 +230,9 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
plugins = new PluginsAndModules();
|
||||
plugins.readFrom(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
ingest = new IngestInfo(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -285,5 +298,11 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
out.writeBoolean(true);
|
||||
plugins.writeTo(out);
|
||||
}
|
||||
if (ingest == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
ingest.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
private boolean transport = true;
|
||||
private boolean http = true;
|
||||
private boolean plugins = true;
|
||||
private boolean ingest = true;
|
||||
|
||||
public NodesInfoRequest() {
|
||||
}
|
||||
|
@ -62,6 +63,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
transport = false;
|
||||
http = false;
|
||||
plugins = false;
|
||||
ingest = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -77,6 +79,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
transport = true;
|
||||
http = true;
|
||||
plugins = true;
|
||||
ingest = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -202,6 +205,22 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
return plugins;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should information about ingest be returned
|
||||
* @param ingest true if you want info
|
||||
*/
|
||||
public NodesInfoRequest ingest(boolean ingest) {
|
||||
this.ingest = ingest;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if information about ingest is requested
|
||||
*/
|
||||
public boolean ingest() {
|
||||
return ingest;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
@ -213,6 +232,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
transport = in.readBoolean();
|
||||
http = in.readBoolean();
|
||||
plugins = in.readBoolean();
|
||||
ingest = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -226,5 +246,6 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
out.writeBoolean(transport);
|
||||
out.writeBoolean(http);
|
||||
out.writeBoolean(plugins);
|
||||
out.writeBoolean(ingest);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesI
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets to reutrn all the data.
|
||||
* Sets to return all the data.
|
||||
*/
|
||||
public NodesInfoRequestBuilder all() {
|
||||
request.all();
|
||||
|
@ -110,4 +110,12 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesI
|
|||
request().plugins(plugins);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the node ingest info be returned.
|
||||
*/
|
||||
public NodesInfoRequestBuilder setIngest(boolean ingest) {
|
||||
request().ingest(ingest);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,10 +19,9 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.node.info;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -65,35 +64,40 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("cluster_name", getClusterName().value(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("cluster_name", getClusterName().value());
|
||||
|
||||
builder.startObject("nodes");
|
||||
for (NodeInfo nodeInfo : this) {
|
||||
builder.startObject(nodeInfo.getNode().id(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.startObject(nodeInfo.getNode().getId());
|
||||
|
||||
builder.field("name", nodeInfo.getNode().name(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("transport_address", nodeInfo.getNode().address().toString());
|
||||
builder.field("host", nodeInfo.getNode().getHostName(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("ip", nodeInfo.getNode().getHostAddress(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", nodeInfo.getNode().getName());
|
||||
builder.field("transport_address", nodeInfo.getNode().getAddress().toString());
|
||||
builder.field("host", nodeInfo.getNode().getHostName());
|
||||
builder.field("ip", nodeInfo.getNode().getHostAddress());
|
||||
|
||||
builder.field("version", nodeInfo.getVersion());
|
||||
builder.field("build_hash", nodeInfo.getBuild().shortHash());
|
||||
|
||||
if (nodeInfo.getServiceAttributes() != null) {
|
||||
for (Map.Entry<String, String> nodeAttribute : nodeInfo.getServiceAttributes().entrySet()) {
|
||||
builder.field(nodeAttribute.getKey(), nodeAttribute.getValue(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field(nodeAttribute.getKey(), nodeAttribute.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
if (!nodeInfo.getNode().attributes().isEmpty()) {
|
||||
builder.startArray("roles");
|
||||
for (DiscoveryNode.Role role : nodeInfo.getNode().getRoles()) {
|
||||
builder.value(role.getRoleName());
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
if (!nodeInfo.getNode().getAttributes().isEmpty()) {
|
||||
builder.startObject("attributes");
|
||||
for (ObjectObjectCursor<String, String> attr : nodeInfo.getNode().attributes()) {
|
||||
builder.field(attr.key, attr.value, XContentBuilder.FieldCaseConversion.NONE);
|
||||
for (Map.Entry<String, String> entry : nodeInfo.getNode().getAttributes().entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
|
||||
if (nodeInfo.getSettings() != null) {
|
||||
builder.startObject("settings");
|
||||
Settings settings = nodeInfo.getSettings();
|
||||
|
@ -122,6 +126,9 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
|
|||
if (nodeInfo.getPlugins() != null) {
|
||||
nodeInfo.getPlugins().toXContent(builder, params);
|
||||
}
|
||||
if (nodeInfo.getIngest() != null) {
|
||||
nodeInfo.getIngest().toXContent(builder, params);
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
|
|
|
@ -23,8 +23,8 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
import org.elasticsearch.action.support.nodes.TransportNodesAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -80,7 +80,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
|||
protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) {
|
||||
NodesInfoRequest request = nodeRequest.request;
|
||||
return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(),
|
||||
request.transport(), request.http(), request.plugins());
|
||||
request.transport(), request.http(), request.plugins(), request.ingest());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -95,8 +95,8 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
|||
public NodeInfoRequest() {
|
||||
}
|
||||
|
||||
NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
||||
super(request, nodeId);
|
||||
public NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
||||
super(nodeId);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
|
|
|
@ -48,18 +48,14 @@ public final class LivenessResponse extends ActionResponse {
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
clusterName = ClusterName.readClusterName(in);
|
||||
if (in.readBoolean()) {
|
||||
node = DiscoveryNode.readNode(in);
|
||||
} else {
|
||||
node = null;
|
||||
}
|
||||
node = in.readOptionalWriteable(DiscoveryNode::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
clusterName.writeTo(out);
|
||||
out.writeOptionalStreamable(node);
|
||||
out.writeOptionalWriteable(node);
|
||||
}
|
||||
|
||||
public ClusterName getClusterName() {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue