merge from master
This commit is contained in:
commit
4bb5b4100d
|
@ -82,4 +82,7 @@
|
|||
(c-set-offset 'func-decl-cont '++)
|
||||
))
|
||||
(c-basic-offset . 4)
|
||||
(c-comment-only-line-offset . (0 . 0)))))
|
||||
(c-comment-only-line-offset . (0 . 0))
|
||||
(fill-column . 140)
|
||||
(fci-rule-column . 140)
|
||||
(compile-command . "gradle compileTestJava"))))
|
||||
|
|
|
@ -4,12 +4,13 @@
|
|||
*.iml
|
||||
*.ipr
|
||||
*.iws
|
||||
build-idea/
|
||||
|
||||
# eclipse files
|
||||
.project
|
||||
.classpath
|
||||
eclipse-build
|
||||
.settings
|
||||
build-eclipse/
|
||||
|
||||
# netbeans files
|
||||
nb-configuration.xml
|
||||
|
@ -18,7 +19,6 @@ nbactions.xml
|
|||
# gradle stuff
|
||||
.gradle/
|
||||
build/
|
||||
generated-resources/
|
||||
|
||||
# maven stuff (to be removed when trunk becomes 4.x)
|
||||
*-execution-hints.log
|
||||
|
@ -38,5 +38,5 @@ html_docs
|
|||
# random old stuff that we should look at the necessity of...
|
||||
/tmp/
|
||||
backwards/
|
||||
|
||||
eclipse-build
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
-/plugins/discovery-azure/target
|
||||
-/plugins/discovery-ec2/target
|
||||
-/plugins/discovery-gce/target
|
||||
-/plugins/discovery-multicast/target
|
||||
-/plugins/jvm-example/target
|
||||
-/plugins/lang-expression/target
|
||||
-/plugins/lang-groovy/target
|
||||
|
|
24
build.gradle
24
build.gradle
|
@ -75,8 +75,9 @@ subprojects {
|
|||
allprojects {
|
||||
// injecting groovy property variables into all projects
|
||||
project.ext {
|
||||
// for eclipse hacks...
|
||||
// for ide hacks...
|
||||
isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse')
|
||||
isIdea = System.getProperty("idea.active") != null || gradle.startParameter.taskNames.contains('idea') || gradle.startParameter.taskNames.contains('cleanIdea')
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -170,12 +171,14 @@ gradle.projectsEvaluated {
|
|||
allprojects {
|
||||
apply plugin: 'idea'
|
||||
|
||||
if (isIdea) {
|
||||
project.buildDir = file('build-idea')
|
||||
}
|
||||
idea {
|
||||
module {
|
||||
// same as for the IntelliJ Gradle tooling integration
|
||||
inheritOutputDirs = false
|
||||
outputDir = file('build/classes/main')
|
||||
testOutputDir = file('build/classes/test')
|
||||
outputDir = file('build-idea/classes/main')
|
||||
testOutputDir = file('build-idea/classes/test')
|
||||
|
||||
iml {
|
||||
// fix so that Gradle idea plugin properly generates support for resource folders
|
||||
|
@ -220,16 +223,25 @@ tasks.idea.dependsOn(buildSrcIdea)
|
|||
// eclipse configuration
|
||||
allprojects {
|
||||
apply plugin: 'eclipse'
|
||||
// Name all the non-root projects after their path so that paths get grouped together when imported into eclipse.
|
||||
if (path != ':') {
|
||||
eclipse.project.name = path
|
||||
}
|
||||
|
||||
plugins.withType(JavaBasePlugin) {
|
||||
eclipse.classpath.defaultOutputDir = new File(project.buildDir, 'eclipse')
|
||||
File eclipseBuild = project.file('build-eclipse')
|
||||
eclipse.classpath.defaultOutputDir = eclipseBuild
|
||||
if (isEclipse) {
|
||||
// set this so generated dirs will be relative to eclipse build
|
||||
project.buildDir = eclipseBuild
|
||||
}
|
||||
eclipse.classpath.file.whenMerged { classpath ->
|
||||
// give each source folder a unique corresponding output folder
|
||||
int i = 0;
|
||||
classpath.entries.findAll { it instanceof SourceFolder }.each { folder ->
|
||||
i++;
|
||||
// this is *NOT* a path or a file.
|
||||
folder.output = "build/eclipse/" + i
|
||||
folder.output = "build-eclipse/" + i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,9 +76,17 @@ extraArchive {
|
|||
tests = false
|
||||
}
|
||||
|
||||
idea {
|
||||
module {
|
||||
inheritOutputDirs = false
|
||||
outputDir = file('build-idea/classes/main')
|
||||
testOutputDir = file('build-idea/classes/test')
|
||||
}
|
||||
}
|
||||
|
||||
eclipse {
|
||||
classpath {
|
||||
defaultOutputDir = new File(file('build'), 'eclipse')
|
||||
defaultOutputDir = file('build-eclipse')
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -78,15 +78,17 @@ class BuildPlugin implements Plugin<Project> {
|
|||
if (project.rootProject.ext.has('buildChecksDone') == false) {
|
||||
String javaHome = findJavaHome()
|
||||
File gradleJavaHome = Jvm.current().javaHome
|
||||
String gradleJavaVersionDetails = "${System.getProperty('java.vendor')} ${System.getProperty('java.version')}" +
|
||||
String javaVendor = System.getProperty('java.vendor')
|
||||
String javaVersion = System.getProperty('java.version')
|
||||
String gradleJavaVersionDetails = "${javaVendor} ${javaVersion}" +
|
||||
" [${System.getProperty('java.vm.name')} ${System.getProperty('java.vm.version')}]"
|
||||
|
||||
String javaVersionDetails = gradleJavaVersionDetails
|
||||
String javaVersion = System.getProperty('java.version')
|
||||
JavaVersion javaVersionEnum = JavaVersion.current()
|
||||
if (new File(javaHome).canonicalPath != gradleJavaHome.canonicalPath) {
|
||||
javaVersionDetails = findJavaVersionDetails(project, javaHome)
|
||||
javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome))
|
||||
javaVendor = findJavaVendor(project, javaHome)
|
||||
javaVersion = findJavaVersion(project, javaHome)
|
||||
}
|
||||
|
||||
|
@ -114,6 +116,25 @@ class BuildPlugin implements Plugin<Project> {
|
|||
throw new GradleException("Java ${minimumJava} or above is required to build Elasticsearch")
|
||||
}
|
||||
|
||||
// this block of code detecting buggy JDK 8 compiler versions can be removed when minimum Java version is incremented
|
||||
assert minimumJava == JavaVersion.VERSION_1_8 : "Remove JDK compiler bug detection only applicable to JDK 8"
|
||||
if (javaVersionEnum == JavaVersion.VERSION_1_8) {
|
||||
if (Objects.equals("Oracle Corporation", javaVendor)) {
|
||||
def matcher = javaVersion =~ /1\.8\.0(?:_(\d+))?/
|
||||
if (matcher.matches()) {
|
||||
int update;
|
||||
if (matcher.group(1) == null) {
|
||||
update = 0
|
||||
} else {
|
||||
update = matcher.group(1).toInteger()
|
||||
}
|
||||
if (update < 40) {
|
||||
throw new GradleException("JDK ${javaVendor} ${javaVersion} has compiler bug JDK-8052388, update your JDK to at least 8u40")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
project.rootProject.ext.javaHome = javaHome
|
||||
project.rootProject.ext.javaVersion = javaVersion
|
||||
project.rootProject.ext.buildChecksDone = true
|
||||
|
@ -153,6 +174,11 @@ class BuildPlugin implements Plugin<Project> {
|
|||
return runJavascript(project, javaHome, versionScript)
|
||||
}
|
||||
|
||||
private static String findJavaVendor(Project project, String javaHome) {
|
||||
String vendorScript = 'print(java.lang.System.getProperty("java.vendor"));'
|
||||
return runJavascript(project, javaHome, vendorScript)
|
||||
}
|
||||
|
||||
/** Finds the parsable java specification version */
|
||||
private static String findJavaVersion(Project project, String javaHome) {
|
||||
String versionScript = 'print(java.lang.System.getProperty("java.version"));'
|
||||
|
@ -289,7 +315,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
String luceneVersion = VersionProperties.lucene
|
||||
if (luceneVersion.contains('-snapshot')) {
|
||||
// extract the revision number from the version with a regex matcher
|
||||
String revision = (luceneVersion =~ /\w+-snapshot-(\d+)/)[0][1]
|
||||
String revision = (luceneVersion =~ /\w+-snapshot-([a-z0-9]+)/)[0][1]
|
||||
repos.maven {
|
||||
name 'lucene-snapshots'
|
||||
url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}"
|
||||
|
@ -309,9 +335,10 @@ class BuildPlugin implements Plugin<Project> {
|
|||
/*
|
||||
* -path because gradle will send in paths that don't always exist.
|
||||
* -missing because we have tons of missing @returns and @param.
|
||||
* -serial because we don't use java serialization.
|
||||
*/
|
||||
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
// compile with compact 3 profile by default
|
||||
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
|
||||
if (project.compactProfile != 'full') {
|
||||
|
@ -371,6 +398,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
systemProperty 'tests.artifact', project.name
|
||||
systemProperty 'tests.task', path
|
||||
systemProperty 'tests.security.manager', 'true'
|
||||
systemProperty 'jna.nosys', 'true'
|
||||
// default test sysprop values
|
||||
systemProperty 'tests.ifNoTests', 'fail'
|
||||
systemProperty 'es.logger.level', 'WARN'
|
||||
|
|
|
@ -112,8 +112,8 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
include 'config/**'
|
||||
include 'bin/**'
|
||||
}
|
||||
from('src/site') {
|
||||
include '_site/**'
|
||||
if (project.path.startsWith(':modules:') == false) {
|
||||
into('elasticsearch')
|
||||
}
|
||||
}
|
||||
project.assemble.dependsOn(bundle)
|
||||
|
|
|
@ -36,15 +36,9 @@ class PluginPropertiesExtension {
|
|||
@Input
|
||||
String description
|
||||
|
||||
@Input
|
||||
boolean jvm = true
|
||||
|
||||
@Input
|
||||
String classname
|
||||
|
||||
@Input
|
||||
boolean site = false
|
||||
|
||||
@Input
|
||||
boolean isolated = true
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.gradle.api.tasks.Copy
|
|||
class PluginPropertiesTask extends Copy {
|
||||
|
||||
PluginPropertiesExtension extension
|
||||
File generatedResourcesDir = new File(project.projectDir, 'generated-resources')
|
||||
File generatedResourcesDir = new File(project.buildDir, 'generated-resources')
|
||||
|
||||
PluginPropertiesTask() {
|
||||
File templateFile = new File(project.buildDir, 'templates/plugin-descriptor.properties')
|
||||
|
@ -51,11 +51,11 @@ class PluginPropertiesTask extends Copy {
|
|||
if (extension.description == null) {
|
||||
throw new InvalidUserDataException('description is a required setting for esplugin')
|
||||
}
|
||||
if (extension.jvm && extension.classname == null) {
|
||||
throw new InvalidUserDataException('classname is a required setting for esplugin with jvm=true')
|
||||
if (extension.classname == null) {
|
||||
throw new InvalidUserDataException('classname is a required setting for esplugin')
|
||||
}
|
||||
doFirst {
|
||||
if (extension.jvm && extension.isolated == false) {
|
||||
if (extension.isolated == false) {
|
||||
String warning = "WARNING: Disabling plugin isolation in ${project.path} is deprecated and will be removed in the future"
|
||||
logger.warn("${'=' * warning.length()}\n${warning}\n${'=' * warning.length()}")
|
||||
}
|
||||
|
@ -74,10 +74,8 @@ class PluginPropertiesTask extends Copy {
|
|||
'version': extension.version,
|
||||
'elasticsearchVersion': VersionProperties.elasticsearch,
|
||||
'javaVersion': project.targetCompatibility as String,
|
||||
'jvm': extension.jvm as String,
|
||||
'site': extension.site as String,
|
||||
'isolated': extension.isolated as String,
|
||||
'classname': extension.jvm ? extension.classname : 'NA'
|
||||
'classname': extension.classname
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,7 +61,6 @@ public class ForbiddenPatternsTask extends DefaultTask {
|
|||
// add mandatory rules
|
||||
patterns.put('nocommit', /nocommit/)
|
||||
patterns.put('tab', /\t/)
|
||||
patterns.put('wildcard imports', /^\s*import.*\.\*/)
|
||||
|
||||
inputs.property("excludes", filesFilter.excludes)
|
||||
inputs.property("rules", patterns)
|
||||
|
|
|
@ -30,9 +30,9 @@ class PrecommitTasks {
|
|||
|
||||
/** Adds a precommit task, which depends on non-test verification tasks. */
|
||||
public static Task create(Project project, boolean includeDependencyLicenses) {
|
||||
|
||||
List<Task> precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
configureCheckstyle(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class),
|
||||
|
@ -83,4 +83,30 @@ class PrecommitTasks {
|
|||
forbiddenApis.group = "" // clear group, so this does not show up under verification tasks
|
||||
return forbiddenApis
|
||||
}
|
||||
|
||||
private static Task configureCheckstyle(Project project) {
|
||||
Task checkstyleTask = project.tasks.create('checkstyle')
|
||||
// Apply the checkstyle plugin to create `checkstyleMain` and `checkstyleTest`. It only
|
||||
// creates them if there is main or test code to check and it makes `check` depend
|
||||
// on them. But we want `precommit` to depend on `checkstyle` which depends on them so
|
||||
// we have to swap them.
|
||||
project.pluginManager.apply('checkstyle')
|
||||
URL checkstyleSuppressions = PrecommitTasks.getResource('/checkstyle_suppressions.xml')
|
||||
project.checkstyle {
|
||||
config = project.resources.text.fromFile(
|
||||
PrecommitTasks.getResource('/checkstyle.xml'), 'UTF-8')
|
||||
configProperties = [
|
||||
suppressions: checkstyleSuppressions
|
||||
]
|
||||
}
|
||||
for (String taskName : ['checkstyleMain', 'checkstyleTest']) {
|
||||
Task task = project.tasks.findByName(taskName)
|
||||
if (task != null) {
|
||||
project.tasks['check'].dependsOn.remove(task)
|
||||
checkstyleTask.dependsOn(task)
|
||||
task.inputs.file(checkstyleSuppressions)
|
||||
}
|
||||
}
|
||||
return checkstyleTask
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.gradle.api.Project
|
|||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
|
||||
import java.time.LocalDateTime
|
||||
|
||||
/** Configuration for an elasticsearch cluster, used for integration tests. */
|
||||
class ClusterConfiguration {
|
||||
|
||||
|
@ -55,7 +57,7 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
ant.get(src: "http://${node.httpUri()}",
|
||||
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
|
||||
retries: 10)
|
||||
|
|
|
@ -46,9 +46,9 @@ class ClusterFormationTasks {
|
|||
/**
|
||||
* Adds dependent tasks to the given task to start and stop a cluster with the given configuration.
|
||||
*
|
||||
* Returns an object that will resolve at execution time of the given task to a uri for the cluster.
|
||||
* Returns a NodeInfo object for the first node in the cluster.
|
||||
*/
|
||||
static Object setup(Project project, Task task, ClusterConfiguration config) {
|
||||
static NodeInfo setup(Project project, Task task, ClusterConfiguration config) {
|
||||
if (task.getEnabled() == false) {
|
||||
// no need to add cluster formation tasks if the task won't run!
|
||||
return
|
||||
|
@ -66,7 +66,7 @@ class ClusterFormationTasks {
|
|||
task.dependsOn(wait)
|
||||
|
||||
// delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests
|
||||
return "${-> nodes[0].transportUri()}"
|
||||
return nodes[0]
|
||||
}
|
||||
|
||||
/** Adds a dependency on the given distribution */
|
||||
|
@ -340,7 +340,7 @@ class ClusterFormationTasks {
|
|||
}
|
||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||
String file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||
Object[] args = [new File(node.homeDir, 'bin/plugin'), 'install', file]
|
||||
Object[] args = [new File(node.homeDir, 'bin/elasticsearch-plugin'), 'install', file]
|
||||
return configureExecTask(name, project, setup, node, args)
|
||||
}
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ class NodeInfo {
|
|||
'JAVA_HOME' : project.javaHome,
|
||||
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
|
||||
]
|
||||
args.add("-Des.tests.portsfile=true")
|
||||
args.add("-Des.node.portsfile=true")
|
||||
args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('es.')) {
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.gradle.test
|
|||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
|
@ -61,8 +60,12 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
// this must run after all projects have been configured, so we know any project
|
||||
// references can be accessed as a fully configured
|
||||
project.gradle.projectsEvaluated {
|
||||
Object clusterUri = ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
systemProperty('tests.cluster', clusterUri)
|
||||
NodeInfo node = ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
systemProperty('tests.rest.cluster', "${-> node.httpUri()}")
|
||||
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
|
||||
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
|
||||
// both as separate sysprops
|
||||
systemProperty('tests.cluster', "${-> node.transportUri()}")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ public class StandaloneTestPlugin implements Plugin<Project> {
|
|||
]
|
||||
RandomizedTestingTask test = project.tasks.create(testOptions)
|
||||
test.configure(BuildPlugin.commonTestConfig(project))
|
||||
BuildPlugin.configureCompile(project)
|
||||
test.classpath = project.sourceSets.test.runtimeClasspath
|
||||
test.testClassesDir project.sourceSets.test.output.classesDir
|
||||
test.mustRunAfter(project.precommit)
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
<?xml version="1.0"?>
|
||||
<!DOCTYPE module PUBLIC
|
||||
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
|
||||
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
|
||||
|
||||
<module name="Checker">
|
||||
<property name="charset" value="UTF-8" />
|
||||
|
||||
<module name="SuppressionFilter">
|
||||
<property name="file" value="${suppressions}" />
|
||||
</module>
|
||||
|
||||
<module name="TreeWalker">
|
||||
<!-- Its our official line length! See checkstyle_suppressions.xml for the files that don't pass this. For now we
|
||||
suppress the check there but enforce it everywhere else. This prevents the list from getting longer even if it is
|
||||
unfair. -->
|
||||
<module name="LineLength">
|
||||
<property name="max" value="140"/>
|
||||
</module>
|
||||
|
||||
<module name="AvoidStarImport" />
|
||||
<!-- Doesn't pass but we could make it pass pretty quick.
|
||||
<module name="UnusedImports">
|
||||
The next property is optional. If we remove it then imports that are
|
||||
only referenced by Javadoc cause the check to fail.
|
||||
<property name="processJavadoc" value="true" />
|
||||
</module>
|
||||
-->
|
||||
|
||||
<!-- Non-inner classes must be in files that match their names. -->
|
||||
<module name="OuterTypeFilename" />
|
||||
<!-- No line wraps inside of import and package statements. -->
|
||||
<module name="NoLineWrap" />
|
||||
<!-- Each java file has only one outer class -->
|
||||
<module name="OneTopLevelClass" />
|
||||
<!-- The suffix L is preferred, because the letter l (ell) is often
|
||||
hard to distinguish from the digit 1 (one). -->
|
||||
<module name="UpperEll"/>
|
||||
|
||||
<module name="EqualsHashCode" />
|
||||
|
||||
<!-- We don't use Java's builtin serialization and we suppress all warning
|
||||
about it. The flip side of that coin is that we shouldn't _try_ to use
|
||||
it. We can't outright ban it with ForbiddenApis because it complain about
|
||||
every we reference a class that implements Serializable like String or
|
||||
Exception.
|
||||
-->
|
||||
<module name="RegexpSinglelineJava">
|
||||
<property name="format" value="serialVersionUID" />
|
||||
<property name="message" value="Do not declare serialVersionUID." />
|
||||
<property name="ignoreComments" value="true" />
|
||||
</module>
|
||||
<module name="RegexpSinglelineJava">
|
||||
<property name="format" value="java\.io\.Serializable" />
|
||||
<property name="message" value="References java.io.Serializable." />
|
||||
<property name="ignoreComments" value="true" />
|
||||
</module>
|
||||
<!-- end Orwellian suppression of Serializable -->
|
||||
</module>
|
||||
</module>
|
File diff suppressed because it is too large
Load Diff
|
@ -128,4 +128,6 @@ java.util.Collections#EMPTY_SET
|
|||
java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness
|
||||
@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness
|
||||
java.util.Random#<init>()
|
||||
java.util.concurrent.ThreadLocalRandom
|
||||
java.util.concurrent.ThreadLocalRandom
|
||||
|
||||
java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests
|
||||
|
|
|
@ -1,27 +1,15 @@
|
|||
# Elasticsearch plugin descriptor file
|
||||
# This file must exist as 'plugin-descriptor.properties' at
|
||||
# the root directory of all plugins.
|
||||
# This file must exist as 'plugin-descriptor.properties' in a folder named `elasticsearch`
|
||||
# inside all plugins.
|
||||
#
|
||||
# A plugin can be 'site', 'jvm', or both.
|
||||
#
|
||||
### example site plugin for "foo":
|
||||
### example plugin for "foo"
|
||||
#
|
||||
# foo.zip <-- zip file for the plugin, with this structure:
|
||||
# _site/ <-- the contents that will be served
|
||||
# plugin-descriptor.properties <-- example contents below:
|
||||
#|____elasticsearch/
|
||||
#| |____ <arbitrary name1>.jar <-- classes, resources, dependencies
|
||||
#| |____ <arbitrary nameN>.jar <-- any number of jars
|
||||
#| |____ plugin-descriptor.properties <-- example contents below:
|
||||
#
|
||||
# site=true
|
||||
# description=My cool plugin
|
||||
# version=1.0
|
||||
#
|
||||
### example jvm plugin for "foo"
|
||||
#
|
||||
# foo.zip <-- zip file for the plugin, with this structure:
|
||||
# <arbitrary name1>.jar <-- classes, resources, dependencies
|
||||
# <arbitrary nameN>.jar <-- any number of jars
|
||||
# plugin-descriptor.properties <-- example contents below:
|
||||
#
|
||||
# jvm=true
|
||||
# classname=foo.bar.BazPlugin
|
||||
# description=My cool plugin
|
||||
# version=2.0
|
||||
|
@ -38,21 +26,6 @@ version=${version}
|
|||
#
|
||||
# 'name': the plugin name
|
||||
name=${name}
|
||||
|
||||
### mandatory elements for site plugins:
|
||||
#
|
||||
# 'site': set to true to indicate contents of the _site/
|
||||
# directory in the root of the plugin should be served.
|
||||
site=${site}
|
||||
#
|
||||
### mandatory elements for jvm plugins :
|
||||
#
|
||||
# 'jvm': true if the 'classname' class should be loaded
|
||||
# from jar files in the root directory of the plugin.
|
||||
# Note that only jar files in the root directory are
|
||||
# added to the classpath for the plugin! If you need
|
||||
# other resources, package them into a resources jar.
|
||||
jvm=${jvm}
|
||||
#
|
||||
# 'classname': the name of the class to load, fully-qualified.
|
||||
classname=${classname}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 3.0.0-SNAPSHOT
|
||||
lucene = 5.5.0-snapshot-1721183
|
||||
lucene = 5.5.0-snapshot-850c6c2
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.5
|
||||
|
|
|
@ -102,8 +102,8 @@ if (isEclipse) {
|
|||
}
|
||||
}
|
||||
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-serial,-try,-unchecked"
|
||||
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-serial,-try,-unchecked"
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
|
||||
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
|
||||
|
||||
forbiddenPatterns {
|
||||
exclude '**/*.json'
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.lucene.util.InPlaceMergeSorter;
|
|||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
@ -326,7 +327,7 @@ public abstract class BlendedTermQuery extends Query {
|
|||
}
|
||||
if ((maxTermFrequency >= 1f && docFreqs[i] > maxTermFrequency)
|
||||
|| (docFreqs[i] > (int) Math.ceil(maxTermFrequency
|
||||
* (float) maxDoc))) {
|
||||
* maxDoc))) {
|
||||
highBuilder.add(query, BooleanClause.Occur.SHOULD);
|
||||
} else {
|
||||
lowBuilder.add(query, BooleanClause.Occur.SHOULD);
|
||||
|
@ -362,15 +363,15 @@ public abstract class BlendedTermQuery extends Query {
|
|||
return new BlendedTermQuery(terms, boosts) {
|
||||
@Override
|
||||
protected Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(tieBreakerMultiplier);
|
||||
List<Query> queries = new ArrayList<>(ctx.length);
|
||||
for (int i = 0; i < terms.length; i++) {
|
||||
Query query = new TermQuery(terms[i], ctx[i]);
|
||||
if (boosts != null && boosts[i] != 1f) {
|
||||
query = new BoostQuery(query, boosts[i]);
|
||||
}
|
||||
disMaxQuery.add(query);
|
||||
queries.add(query);
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, tieBreakerMultiplier);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -142,19 +142,19 @@ public class MapperQueryParser extends QueryParser {
|
|||
return getFieldQuerySingle(fields.iterator().next(), queryText, quoted);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getFieldQuerySingle(mField, queryText, quoted);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -242,20 +242,20 @@ public class MapperQueryParser extends QueryParser {
|
|||
Collection<String> fields = extractMultiFields(field);
|
||||
if (fields != null) {
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = super.getFieldQuery(mField, queryText, slop);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
q = applySlop(q, slop);
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -295,19 +295,19 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -359,19 +359,19 @@ public class MapperQueryParser extends QueryParser {
|
|||
return getFuzzyQuerySingle(fields.iterator().next(), termStr, minSimilarity);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -422,19 +422,19 @@ public class MapperQueryParser extends QueryParser {
|
|||
return getPrefixQuerySingle(fields.iterator().next(), termStr);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getPrefixQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -552,19 +552,19 @@ public class MapperQueryParser extends QueryParser {
|
|||
return getWildcardQuerySingle(fields.iterator().next(), termStr);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getWildcardQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
@ -681,19 +681,19 @@ public class MapperQueryParser extends QueryParser {
|
|||
return getRegexpQuerySingle(fields.iterator().next(), termStr);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getRegexpQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
disMaxQuery.add(applyBoost(mField, q));
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return disMaxQuery;
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -39,6 +40,9 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||
|
||||
/**
|
||||
* A base class for all elasticsearch exceptions.
|
||||
*/
|
||||
|
@ -49,6 +53,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true;
|
||||
public static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false;
|
||||
private static final String INDEX_HEADER_KEY = "es.index";
|
||||
private static final String INDEX_HEADER_KEY_UUID = "es.index_uuid";
|
||||
private static final String SHARD_HEADER_KEY = "es.shard";
|
||||
private static final String RESOURCE_HEADER_TYPE_KEY = "es.resource.type";
|
||||
private static final String RESOURCE_HEADER_ID_KEY = "es.resource.id";
|
||||
|
@ -70,7 +75,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
* The message can be parameterized using <code>{}</code> as placeholders for the given
|
||||
* arguments
|
||||
*
|
||||
* @param msg the detail message
|
||||
* @param msg the detail message
|
||||
* @param args the arguments for the message
|
||||
*/
|
||||
public ElasticsearchException(String msg, Object... args) {
|
||||
|
@ -202,7 +207,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
* @param exType the exception type to look for
|
||||
* @return whether there is a nested exception of the specified type
|
||||
*/
|
||||
public boolean contains(Class exType) {
|
||||
public boolean contains(Class<? extends Throwable> exType) {
|
||||
if (exType == null) {
|
||||
return false;
|
||||
}
|
||||
|
@ -252,7 +257,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
}
|
||||
|
||||
/**
|
||||
* Retruns <code>true</code> iff the given class is a registered for an exception to be read.
|
||||
* Returns <code>true</code> iff the given class is a registered for an exception to be read.
|
||||
*/
|
||||
public static boolean isRegistered(Class<? extends Throwable> exception) {
|
||||
return CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE.containsKey(exception);
|
||||
|
@ -332,7 +337,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
|
||||
private void xContentHeader(XContentBuilder builder, String key, List<String> values) throws IOException {
|
||||
if (values != null && values.isEmpty() == false) {
|
||||
if(values.size() == 1) {
|
||||
if (values.size() == 1) {
|
||||
builder.field(key, values.get(0));
|
||||
} else {
|
||||
builder.startArray(key);
|
||||
|
@ -367,18 +372,18 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the root cause of this exception or mupltiple if different shards caused different exceptions
|
||||
* Returns the root cause of this exception or multiple if different shards caused different exceptions
|
||||
*/
|
||||
public ElasticsearchException[] guessRootCauses() {
|
||||
final Throwable cause = getCause();
|
||||
if (cause != null && cause instanceof ElasticsearchException) {
|
||||
return ((ElasticsearchException) cause).guessRootCauses();
|
||||
}
|
||||
return new ElasticsearchException[] {this};
|
||||
return new ElasticsearchException[]{this};
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the root cause of this exception or mupltiple if different shards caused different exceptions.
|
||||
* Returns the root cause of this exception or multiple if different shards caused different exceptions.
|
||||
* If the given exception is not an instance of {@link org.elasticsearch.ElasticsearchException} an empty array
|
||||
* is returned.
|
||||
*/
|
||||
|
@ -387,7 +392,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
if (ex instanceof ElasticsearchException) {
|
||||
return ((ElasticsearchException) ex).guessRootCauses();
|
||||
}
|
||||
return new ElasticsearchException[] {new ElasticsearchException(t.getMessage(), t) {
|
||||
return new ElasticsearchException[]{new ElasticsearchException(t.getMessage(), t) {
|
||||
@Override
|
||||
protected String getExceptionName() {
|
||||
return getExceptionName(getCause());
|
||||
|
@ -414,7 +419,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
if (headers.containsKey(INDEX_HEADER_KEY)) {
|
||||
builder.append('[').append(getIndex()).append(']');
|
||||
builder.append(getIndex());
|
||||
if (headers.containsKey(SHARD_HEADER_KEY)) {
|
||||
builder.append('[').append(getShardId()).append(']');
|
||||
}
|
||||
|
@ -435,7 +440,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
final String fileName = in.readOptionalString();
|
||||
final String methodName = in.readString();
|
||||
final int lineNumber = in.readVInt();
|
||||
stackTrace[i] = new StackTraceElement(declaringClasss,methodName, fileName, lineNumber);
|
||||
stackTrace[i] = new StackTraceElement(declaringClasss, methodName, fileName, lineNumber);
|
||||
}
|
||||
throwable.setStackTrace(stackTrace);
|
||||
|
||||
|
@ -466,157 +471,288 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
return throwable;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the list of Exceptions Elasticsearch can throw over the wire or save into a corruption marker. Each value in the enum is a
|
||||
* single exception tying the Class to an id for use of the encode side and the id back to a constructor for use on the decode side. As
|
||||
* such its ok if the exceptions to change names so long as their constructor can still read the exception. Each exception is listed
|
||||
* in id order below. If you want to remove an exception leave a tombstone comment and mark the id as null in
|
||||
* ExceptionSerializationTests.testIds.ids.
|
||||
*/
|
||||
enum ElasticsearchExceptionHandle {
|
||||
// each exception gets an assigned id that must never change. While the exception name can
|
||||
// change due to refactorings etc. like renaming we have to keep the ordinal <--> class mapping
|
||||
// to deserialize the exception coming from another node or from an corruption marker on
|
||||
// a corrupted index.
|
||||
// these exceptions can be ordered and removed, but (repeating) the ids must never change
|
||||
// to remove an exception, remove the enum value below, and mark the id as null in ExceptionSerializationTests.testIds.ids
|
||||
INDEX_SHARD_SNAPSHOT_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class, org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0),
|
||||
DFS_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class, org.elasticsearch.search.dfs.DfsPhaseExecutionException::new, 1),
|
||||
EXECUTION_CANCELLED_EXCEPTION(org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class, org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException::new, 2),
|
||||
MASTER_NOT_DISCOVERED_EXCEPTION(org.elasticsearch.discovery.MasterNotDiscoveredException.class, org.elasticsearch.discovery.MasterNotDiscoveredException::new, 3),
|
||||
ELASTICSEARCH_SECURITY_EXCEPTION(org.elasticsearch.ElasticsearchSecurityException.class, org.elasticsearch.ElasticsearchSecurityException::new, 4),
|
||||
INDEX_SHARD_RESTORE_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreException.class, org.elasticsearch.index.snapshots.IndexShardRestoreException::new, 5),
|
||||
INDEX_CLOSED_EXCEPTION(org.elasticsearch.indices.IndexClosedException.class, org.elasticsearch.indices.IndexClosedException::new, 6),
|
||||
BIND_HTTP_EXCEPTION(org.elasticsearch.http.BindHttpException.class, org.elasticsearch.http.BindHttpException::new, 7),
|
||||
REDUCE_SEARCH_PHASE_EXCEPTION(org.elasticsearch.action.search.ReduceSearchPhaseException.class, org.elasticsearch.action.search.ReduceSearchPhaseException::new, 8),
|
||||
NODE_CLOSED_EXCEPTION(org.elasticsearch.node.NodeClosedException.class, org.elasticsearch.node.NodeClosedException::new, 9),
|
||||
SNAPSHOT_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.SnapshotFailedEngineException.class, org.elasticsearch.index.engine.SnapshotFailedEngineException::new, 10),
|
||||
SHARD_NOT_FOUND_EXCEPTION(org.elasticsearch.index.shard.ShardNotFoundException.class, org.elasticsearch.index.shard.ShardNotFoundException::new, 11),
|
||||
CONNECT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ConnectTransportException.class, org.elasticsearch.transport.ConnectTransportException::new, 12),
|
||||
NOT_SERIALIZABLE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.NotSerializableTransportException.class, org.elasticsearch.transport.NotSerializableTransportException::new, 13),
|
||||
RESPONSE_HANDLER_FAILURE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ResponseHandlerFailureTransportException.class, org.elasticsearch.transport.ResponseHandlerFailureTransportException::new, 14),
|
||||
INDEX_CREATION_EXCEPTION(org.elasticsearch.indices.IndexCreationException.class, org.elasticsearch.indices.IndexCreationException::new, 15),
|
||||
INDEX_NOT_FOUND_EXCEPTION(org.elasticsearch.index.IndexNotFoundException.class, org.elasticsearch.index.IndexNotFoundException::new, 16),
|
||||
ILLEGAL_SHARD_ROUTING_STATE_EXCEPTION(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException.class, org.elasticsearch.cluster.routing.IllegalShardRoutingStateException::new, 17),
|
||||
BROADCAST_SHARD_OPERATION_FAILED_EXCEPTION(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException.class, org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException::new, 18),
|
||||
RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class, org.elasticsearch.ResourceNotFoundException::new, 19),
|
||||
ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class, org.elasticsearch.transport.ActionTransportException::new, 20),
|
||||
ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class, org.elasticsearch.ElasticsearchGenerationException::new, 21),
|
||||
INDEX_SHARD_SNAPSHOT_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0),
|
||||
DFS_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class,
|
||||
org.elasticsearch.search.dfs.DfsPhaseExecutionException::new, 1),
|
||||
EXECUTION_CANCELLED_EXCEPTION(org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class,
|
||||
org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException::new, 2),
|
||||
MASTER_NOT_DISCOVERED_EXCEPTION(org.elasticsearch.discovery.MasterNotDiscoveredException.class,
|
||||
org.elasticsearch.discovery.MasterNotDiscoveredException::new, 3),
|
||||
ELASTICSEARCH_SECURITY_EXCEPTION(org.elasticsearch.ElasticsearchSecurityException.class,
|
||||
org.elasticsearch.ElasticsearchSecurityException::new, 4),
|
||||
INDEX_SHARD_RESTORE_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardRestoreException::new, 5),
|
||||
INDEX_CLOSED_EXCEPTION(org.elasticsearch.indices.IndexClosedException.class,
|
||||
org.elasticsearch.indices.IndexClosedException::new, 6),
|
||||
BIND_HTTP_EXCEPTION(org.elasticsearch.http.BindHttpException.class,
|
||||
org.elasticsearch.http.BindHttpException::new, 7),
|
||||
REDUCE_SEARCH_PHASE_EXCEPTION(org.elasticsearch.action.search.ReduceSearchPhaseException.class,
|
||||
org.elasticsearch.action.search.ReduceSearchPhaseException::new, 8),
|
||||
NODE_CLOSED_EXCEPTION(org.elasticsearch.node.NodeClosedException.class,
|
||||
org.elasticsearch.node.NodeClosedException::new, 9),
|
||||
SNAPSHOT_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.SnapshotFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.SnapshotFailedEngineException::new, 10),
|
||||
SHARD_NOT_FOUND_EXCEPTION(org.elasticsearch.index.shard.ShardNotFoundException.class,
|
||||
org.elasticsearch.index.shard.ShardNotFoundException::new, 11),
|
||||
CONNECT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ConnectTransportException.class,
|
||||
org.elasticsearch.transport.ConnectTransportException::new, 12),
|
||||
NOT_SERIALIZABLE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.NotSerializableTransportException.class,
|
||||
org.elasticsearch.transport.NotSerializableTransportException::new, 13),
|
||||
RESPONSE_HANDLER_FAILURE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ResponseHandlerFailureTransportException.class,
|
||||
org.elasticsearch.transport.ResponseHandlerFailureTransportException::new, 14),
|
||||
INDEX_CREATION_EXCEPTION(org.elasticsearch.indices.IndexCreationException.class,
|
||||
org.elasticsearch.indices.IndexCreationException::new, 15),
|
||||
INDEX_NOT_FOUND_EXCEPTION(org.elasticsearch.index.IndexNotFoundException.class,
|
||||
org.elasticsearch.index.IndexNotFoundException::new, 16),
|
||||
ILLEGAL_SHARD_ROUTING_STATE_EXCEPTION(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException.class,
|
||||
org.elasticsearch.cluster.routing.IllegalShardRoutingStateException::new, 17),
|
||||
BROADCAST_SHARD_OPERATION_FAILED_EXCEPTION(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException.class,
|
||||
org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException::new, 18),
|
||||
RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class,
|
||||
org.elasticsearch.ResourceNotFoundException::new, 19),
|
||||
ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class,
|
||||
org.elasticsearch.transport.ActionTransportException::new, 20),
|
||||
ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class,
|
||||
org.elasticsearch.ElasticsearchGenerationException::new, 21),
|
||||
// 22 was CreateFailedEngineException
|
||||
INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class, org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
|
||||
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class, org.elasticsearch.search.SearchContextMissingException::new, 24),
|
||||
INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
|
||||
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class,
|
||||
org.elasticsearch.search.SearchContextMissingException::new, 24),
|
||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 25),
|
||||
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class, org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
|
||||
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class, org.elasticsearch.snapshots.SnapshotCreationException::new, 27),
|
||||
DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class, org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),
|
||||
DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class, org.elasticsearch.index.engine.DocumentMissingException::new, 29),
|
||||
SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class, org.elasticsearch.snapshots.SnapshotException::new, 30),
|
||||
INVALID_ALIAS_NAME_EXCEPTION(org.elasticsearch.indices.InvalidAliasNameException.class, org.elasticsearch.indices.InvalidAliasNameException::new, 31),
|
||||
INVALID_INDEX_NAME_EXCEPTION(org.elasticsearch.indices.InvalidIndexNameException.class, org.elasticsearch.indices.InvalidIndexNameException::new, 32),
|
||||
INDEX_PRIMARY_SHARD_NOT_ALLOCATED_EXCEPTION(org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException.class, org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException::new, 33),
|
||||
TRANSPORT_EXCEPTION(org.elasticsearch.transport.TransportException.class, org.elasticsearch.transport.TransportException::new, 34),
|
||||
ELASTICSEARCH_PARSE_EXCEPTION(org.elasticsearch.ElasticsearchParseException.class, org.elasticsearch.ElasticsearchParseException::new, 35),
|
||||
SEARCH_EXCEPTION(org.elasticsearch.search.SearchException.class, org.elasticsearch.search.SearchException::new, 36),
|
||||
MAPPER_EXCEPTION(org.elasticsearch.index.mapper.MapperException.class, org.elasticsearch.index.mapper.MapperException::new, 37),
|
||||
INVALID_TYPE_NAME_EXCEPTION(org.elasticsearch.indices.InvalidTypeNameException.class, org.elasticsearch.indices.InvalidTypeNameException::new, 38),
|
||||
SNAPSHOT_RESTORE_EXCEPTION(org.elasticsearch.snapshots.SnapshotRestoreException.class, org.elasticsearch.snapshots.SnapshotRestoreException::new, 39),
|
||||
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class,
|
||||
org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
|
||||
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class,
|
||||
org.elasticsearch.snapshots.SnapshotCreationException::new, 27),
|
||||
DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),
|
||||
DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class,
|
||||
org.elasticsearch.index.engine.DocumentMissingException::new, 29),
|
||||
SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class,
|
||||
org.elasticsearch.snapshots.SnapshotException::new, 30),
|
||||
INVALID_ALIAS_NAME_EXCEPTION(org.elasticsearch.indices.InvalidAliasNameException.class,
|
||||
org.elasticsearch.indices.InvalidAliasNameException::new, 31),
|
||||
INVALID_INDEX_NAME_EXCEPTION(org.elasticsearch.indices.InvalidIndexNameException.class,
|
||||
org.elasticsearch.indices.InvalidIndexNameException::new, 32),
|
||||
INDEX_PRIMARY_SHARD_NOT_ALLOCATED_EXCEPTION(org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException.class,
|
||||
org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException::new, 33),
|
||||
TRANSPORT_EXCEPTION(org.elasticsearch.transport.TransportException.class,
|
||||
org.elasticsearch.transport.TransportException::new, 34),
|
||||
ELASTICSEARCH_PARSE_EXCEPTION(org.elasticsearch.ElasticsearchParseException.class,
|
||||
org.elasticsearch.ElasticsearchParseException::new, 35),
|
||||
SEARCH_EXCEPTION(org.elasticsearch.search.SearchException.class,
|
||||
org.elasticsearch.search.SearchException::new, 36),
|
||||
MAPPER_EXCEPTION(org.elasticsearch.index.mapper.MapperException.class,
|
||||
org.elasticsearch.index.mapper.MapperException::new, 37),
|
||||
INVALID_TYPE_NAME_EXCEPTION(org.elasticsearch.indices.InvalidTypeNameException.class,
|
||||
org.elasticsearch.indices.InvalidTypeNameException::new, 38),
|
||||
SNAPSHOT_RESTORE_EXCEPTION(org.elasticsearch.snapshots.SnapshotRestoreException.class,
|
||||
org.elasticsearch.snapshots.SnapshotRestoreException::new, 39),
|
||||
PARSING_EXCEPTION(org.elasticsearch.common.ParsingException.class, org.elasticsearch.common.ParsingException::new, 40),
|
||||
INDEX_SHARD_CLOSED_EXCEPTION(org.elasticsearch.index.shard.IndexShardClosedException.class, org.elasticsearch.index.shard.IndexShardClosedException::new, 41),
|
||||
RECOVER_FILES_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class, org.elasticsearch.indices.recovery.RecoverFilesRecoveryException::new, 42),
|
||||
TRUNCATED_TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TruncatedTranslogException.class, org.elasticsearch.index.translog.TruncatedTranslogException::new, 43),
|
||||
RECOVERY_FAILED_EXCEPTION(org.elasticsearch.indices.recovery.RecoveryFailedException.class, org.elasticsearch.indices.recovery.RecoveryFailedException::new, 44),
|
||||
INDEX_SHARD_RELOCATED_EXCEPTION(org.elasticsearch.index.shard.IndexShardRelocatedException.class, org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45),
|
||||
NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class, org.elasticsearch.transport.NodeShouldNotConnectException::new, 46),
|
||||
INDEX_TEMPLATE_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class, org.elasticsearch.indices.IndexTemplateAlreadyExistsException::new, 47),
|
||||
TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class, org.elasticsearch.index.translog.TranslogCorruptedException::new, 48),
|
||||
CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class, org.elasticsearch.cluster.block.ClusterBlockException::new, 49),
|
||||
FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class, org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50),
|
||||
INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class, org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51),
|
||||
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class, org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
|
||||
INDEX_SHARD_CLOSED_EXCEPTION(org.elasticsearch.index.shard.IndexShardClosedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardClosedException::new, 41),
|
||||
RECOVER_FILES_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class,
|
||||
org.elasticsearch.indices.recovery.RecoverFilesRecoveryException::new, 42),
|
||||
TRUNCATED_TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TruncatedTranslogException.class,
|
||||
org.elasticsearch.index.translog.TruncatedTranslogException::new, 43),
|
||||
RECOVERY_FAILED_EXCEPTION(org.elasticsearch.indices.recovery.RecoveryFailedException.class,
|
||||
org.elasticsearch.indices.recovery.RecoveryFailedException::new, 44),
|
||||
INDEX_SHARD_RELOCATED_EXCEPTION(org.elasticsearch.index.shard.IndexShardRelocatedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45),
|
||||
NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class,
|
||||
org.elasticsearch.transport.NodeShouldNotConnectException::new, 46),
|
||||
INDEX_TEMPLATE_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class,
|
||||
org.elasticsearch.indices.IndexTemplateAlreadyExistsException::new, 47),
|
||||
TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class,
|
||||
org.elasticsearch.index.translog.TranslogCorruptedException::new, 48),
|
||||
CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class,
|
||||
org.elasticsearch.cluster.block.ClusterBlockException::new, 49),
|
||||
FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class,
|
||||
org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50),
|
||||
INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class,
|
||||
org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51),
|
||||
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class,
|
||||
org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
|
||||
ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53),
|
||||
// 54 was DocumentAlreadyExistsException, which is superseded by VersionConflictEngineException
|
||||
NO_SUCH_NODE_EXCEPTION(org.elasticsearch.action.NoSuchNodeException.class, org.elasticsearch.action.NoSuchNodeException::new, 55),
|
||||
SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class, org.elasticsearch.common.settings.SettingsException::new, 56),
|
||||
INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class, org.elasticsearch.indices.IndexTemplateMissingException::new, 57),
|
||||
SEND_REQUEST_TRANSPORT_EXCEPTION(org.elasticsearch.transport.SendRequestTransportException.class, org.elasticsearch.transport.SendRequestTransportException::new, 58),
|
||||
ES_REJECTED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class, org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59),
|
||||
EARLY_TERMINATION_EXCEPTION(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class, org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60),
|
||||
ROUTING_VALIDATION_EXCEPTION(org.elasticsearch.cluster.routing.RoutingValidationException.class, org.elasticsearch.cluster.routing.RoutingValidationException::new, 61),
|
||||
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class, org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
|
||||
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class, org.elasticsearch.indices.AliasFilterParsingException::new, 63),
|
||||
SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class,
|
||||
org.elasticsearch.common.settings.SettingsException::new, 56),
|
||||
INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class,
|
||||
org.elasticsearch.indices.IndexTemplateMissingException::new, 57),
|
||||
SEND_REQUEST_TRANSPORT_EXCEPTION(org.elasticsearch.transport.SendRequestTransportException.class,
|
||||
org.elasticsearch.transport.SendRequestTransportException::new, 58),
|
||||
ES_REJECTED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class,
|
||||
org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59),
|
||||
EARLY_TERMINATION_EXCEPTION(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class,
|
||||
org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60),
|
||||
ROUTING_VALIDATION_EXCEPTION(org.elasticsearch.cluster.routing.RoutingValidationException.class,
|
||||
org.elasticsearch.cluster.routing.RoutingValidationException::new, 61),
|
||||
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class,
|
||||
org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
|
||||
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class,
|
||||
org.elasticsearch.indices.AliasFilterParsingException::new, 63),
|
||||
// 64 was DeleteByQueryFailedEngineException, which was removed in 3.0
|
||||
GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65),
|
||||
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class, org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),
|
||||
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class,
|
||||
org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),
|
||||
HTTP_EXCEPTION(org.elasticsearch.http.HttpException.class, org.elasticsearch.http.HttpException::new, 67),
|
||||
ELASTICSEARCH_EXCEPTION(org.elasticsearch.ElasticsearchException.class, org.elasticsearch.ElasticsearchException::new, 68),
|
||||
SNAPSHOT_MISSING_EXCEPTION(org.elasticsearch.snapshots.SnapshotMissingException.class, org.elasticsearch.snapshots.SnapshotMissingException::new, 69),
|
||||
PRIMARY_MISSING_ACTION_EXCEPTION(org.elasticsearch.action.PrimaryMissingActionException.class, org.elasticsearch.action.PrimaryMissingActionException::new, 70),
|
||||
ELASTICSEARCH_EXCEPTION(org.elasticsearch.ElasticsearchException.class,
|
||||
org.elasticsearch.ElasticsearchException::new, 68),
|
||||
SNAPSHOT_MISSING_EXCEPTION(org.elasticsearch.snapshots.SnapshotMissingException.class,
|
||||
org.elasticsearch.snapshots.SnapshotMissingException::new, 69),
|
||||
PRIMARY_MISSING_ACTION_EXCEPTION(org.elasticsearch.action.PrimaryMissingActionException.class,
|
||||
org.elasticsearch.action.PrimaryMissingActionException::new, 70),
|
||||
FAILED_NODE_EXCEPTION(org.elasticsearch.action.FailedNodeException.class, org.elasticsearch.action.FailedNodeException::new, 71),
|
||||
SEARCH_PARSE_EXCEPTION(org.elasticsearch.search.SearchParseException.class, org.elasticsearch.search.SearchParseException::new, 72),
|
||||
CONCURRENT_SNAPSHOT_EXECUTION_EXCEPTION(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class, org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, 73),
|
||||
BLOB_STORE_EXCEPTION(org.elasticsearch.common.blobstore.BlobStoreException.class, org.elasticsearch.common.blobstore.BlobStoreException::new, 74),
|
||||
INCOMPATIBLE_CLUSTER_STATE_VERSION_EXCEPTION(org.elasticsearch.cluster.IncompatibleClusterStateVersionException.class, org.elasticsearch.cluster.IncompatibleClusterStateVersionException::new, 75),
|
||||
RECOVERY_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RecoveryEngineException.class, org.elasticsearch.index.engine.RecoveryEngineException::new, 76),
|
||||
UNCATEGORIZED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class, org.elasticsearch.common.util.concurrent.UncategorizedExecutionException::new, 77),
|
||||
TIMESTAMP_PARSING_EXCEPTION(org.elasticsearch.action.TimestampParsingException.class, org.elasticsearch.action.TimestampParsingException::new, 78),
|
||||
ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class, org.elasticsearch.action.RoutingMissingException::new, 79),
|
||||
INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class, org.elasticsearch.index.engine.IndexFailedEngineException::new, 80),
|
||||
INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class, org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81),
|
||||
REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class, org.elasticsearch.repositories.RepositoryException::new, 82),
|
||||
RECEIVE_TIMEOUT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ReceiveTimeoutTransportException.class, org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83),
|
||||
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class, org.elasticsearch.transport.NodeDisconnectedException::new, 84),
|
||||
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class, org.elasticsearch.index.AlreadyExpiredException::new, 85),
|
||||
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class, org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
|
||||
CONCURRENT_SNAPSHOT_EXECUTION_EXCEPTION(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class,
|
||||
org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, 73),
|
||||
BLOB_STORE_EXCEPTION(org.elasticsearch.common.blobstore.BlobStoreException.class,
|
||||
org.elasticsearch.common.blobstore.BlobStoreException::new, 74),
|
||||
INCOMPATIBLE_CLUSTER_STATE_VERSION_EXCEPTION(org.elasticsearch.cluster.IncompatibleClusterStateVersionException.class,
|
||||
org.elasticsearch.cluster.IncompatibleClusterStateVersionException::new, 75),
|
||||
RECOVERY_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RecoveryEngineException.class,
|
||||
org.elasticsearch.index.engine.RecoveryEngineException::new, 76),
|
||||
UNCATEGORIZED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class,
|
||||
org.elasticsearch.common.util.concurrent.UncategorizedExecutionException::new, 77),
|
||||
TIMESTAMP_PARSING_EXCEPTION(org.elasticsearch.action.TimestampParsingException.class,
|
||||
org.elasticsearch.action.TimestampParsingException::new, 78),
|
||||
ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class,
|
||||
org.elasticsearch.action.RoutingMissingException::new, 79),
|
||||
INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.IndexFailedEngineException::new, 80),
|
||||
INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81),
|
||||
REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class,
|
||||
org.elasticsearch.repositories.RepositoryException::new, 82),
|
||||
RECEIVE_TIMEOUT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ReceiveTimeoutTransportException.class,
|
||||
org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83),
|
||||
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class,
|
||||
org.elasticsearch.transport.NodeDisconnectedException::new, 84),
|
||||
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class,
|
||||
org.elasticsearch.index.AlreadyExpiredException::new, 85),
|
||||
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class,
|
||||
org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
|
||||
// 87 used to be for MergeMappingException
|
||||
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class, org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
|
||||
PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class, org.elasticsearch.percolator.PercolateException::new, 89),
|
||||
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
|
||||
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class, org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),
|
||||
DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class, org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),
|
||||
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class,
|
||||
org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
|
||||
PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class,
|
||||
org.elasticsearch.percolator.PercolateException::new, 89),
|
||||
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
|
||||
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class,
|
||||
org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),
|
||||
DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class,
|
||||
org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),
|
||||
// 93 used to be for IndexWarmerMissingException
|
||||
NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class, org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),
|
||||
INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class, org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),
|
||||
ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class, org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),
|
||||
INDEX_SHARD_SNAPSHOT_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotException.class, org.elasticsearch.index.snapshots.IndexShardSnapshotException::new, 98),
|
||||
INDEX_SHARD_NOT_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotStartedException.class, org.elasticsearch.index.shard.IndexShardNotStartedException::new, 99),
|
||||
SEARCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.action.search.SearchPhaseExecutionException.class, org.elasticsearch.action.search.SearchPhaseExecutionException::new, 100),
|
||||
ACTION_NOT_FOUND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionNotFoundTransportException.class, org.elasticsearch.transport.ActionNotFoundTransportException::new, 101),
|
||||
TRANSPORT_SERIALIZATION_EXCEPTION(org.elasticsearch.transport.TransportSerializationException.class, org.elasticsearch.transport.TransportSerializationException::new, 102),
|
||||
REMOTE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.RemoteTransportException.class, org.elasticsearch.transport.RemoteTransportException::new, 103),
|
||||
ENGINE_CREATION_FAILURE_EXCEPTION(org.elasticsearch.index.engine.EngineCreationFailureException.class, org.elasticsearch.index.engine.EngineCreationFailureException::new, 104),
|
||||
ROUTING_EXCEPTION(org.elasticsearch.cluster.routing.RoutingException.class, org.elasticsearch.cluster.routing.RoutingException::new, 105),
|
||||
INDEX_SHARD_RECOVERY_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveryException.class, org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
|
||||
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class, org.elasticsearch.repositories.RepositoryMissingException::new, 107),
|
||||
PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class, org.elasticsearch.index.percolator.PercolatorException::new, 108),
|
||||
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class, org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
|
||||
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class, org.elasticsearch.index.engine.FlushNotAllowedEngineException::new, 110),
|
||||
NO_CLASS_SETTINGS_EXCEPTION(org.elasticsearch.common.settings.NoClassSettingsException.class, org.elasticsearch.common.settings.NoClassSettingsException::new, 111),
|
||||
BIND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.BindTransportException.class, org.elasticsearch.transport.BindTransportException::new, 112),
|
||||
ALIASES_NOT_FOUND_EXCEPTION(org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException.class, org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException::new, 113),
|
||||
INDEX_SHARD_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveringException.class, org.elasticsearch.index.shard.IndexShardRecoveringException::new, 114),
|
||||
TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TranslogException.class, org.elasticsearch.index.translog.TranslogException::new, 115),
|
||||
PROCESS_CLUSTER_EVENT_TIMEOUT_EXCEPTION(org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class, org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException::new, 116),
|
||||
RETRY_ON_PRIMARY_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException.class, org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException::new, 117),
|
||||
ELASTICSEARCH_TIMEOUT_EXCEPTION(org.elasticsearch.ElasticsearchTimeoutException.class, org.elasticsearch.ElasticsearchTimeoutException::new, 118),
|
||||
QUERY_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.query.QueryPhaseExecutionException.class, org.elasticsearch.search.query.QueryPhaseExecutionException::new, 119),
|
||||
REPOSITORY_VERIFICATION_EXCEPTION(org.elasticsearch.repositories.RepositoryVerificationException.class, org.elasticsearch.repositories.RepositoryVerificationException::new, 120),
|
||||
INVALID_AGGREGATION_PATH_EXCEPTION(org.elasticsearch.search.aggregations.InvalidAggregationPathException.class, org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
|
||||
INDEX_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexAlreadyExistsException.class, org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
|
||||
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class, org.elasticsearch.script.Script.ScriptParseException::new, 124),
|
||||
HTTP_ON_TRANSPORT_EXCEPTION(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class, org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException::new, 125),
|
||||
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class, org.elasticsearch.index.mapper.MapperParsingException::new, 126),
|
||||
SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class, org.elasticsearch.search.SearchContextException::new, 127),
|
||||
SEARCH_SOURCE_BUILDER_EXCEPTION(org.elasticsearch.search.builder.SearchSourceBuilderException.class, org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128),
|
||||
ENGINE_CLOSED_EXCEPTION(org.elasticsearch.index.engine.EngineClosedException.class, org.elasticsearch.index.engine.EngineClosedException::new, 129),
|
||||
NO_SHARD_AVAILABLE_ACTION_EXCEPTION(org.elasticsearch.action.NoShardAvailableActionException.class, org.elasticsearch.action.NoShardAvailableActionException::new, 130),
|
||||
UNAVAILABLE_SHARDS_EXCEPTION(org.elasticsearch.action.UnavailableShardsException.class, org.elasticsearch.action.UnavailableShardsException::new, 131),
|
||||
FLUSH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushFailedEngineException.class, org.elasticsearch.index.engine.FlushFailedEngineException::new, 132),
|
||||
CIRCUIT_BREAKING_EXCEPTION(org.elasticsearch.common.breaker.CircuitBreakingException.class, org.elasticsearch.common.breaker.CircuitBreakingException::new, 133),
|
||||
NODE_NOT_CONNECTED_EXCEPTION(org.elasticsearch.transport.NodeNotConnectedException.class, org.elasticsearch.transport.NodeNotConnectedException::new, 134),
|
||||
STRICT_DYNAMIC_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.StrictDynamicMappingException.class, org.elasticsearch.index.mapper.StrictDynamicMappingException::new, 135),
|
||||
RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class, org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136),
|
||||
TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class, org.elasticsearch.indices.TypeMissingException::new, 137),
|
||||
FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class, org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140),
|
||||
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class, org.elasticsearch.index.query.QueryShardException::new, 141);
|
||||
NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class,
|
||||
org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),
|
||||
INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class,
|
||||
org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),
|
||||
ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class,
|
||||
org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),
|
||||
INDEX_SHARD_SNAPSHOT_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardSnapshotException::new, 98),
|
||||
INDEX_SHARD_NOT_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotStartedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardNotStartedException::new, 99),
|
||||
SEARCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.action.search.SearchPhaseExecutionException.class,
|
||||
org.elasticsearch.action.search.SearchPhaseExecutionException::new, 100),
|
||||
ACTION_NOT_FOUND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionNotFoundTransportException.class,
|
||||
org.elasticsearch.transport.ActionNotFoundTransportException::new, 101),
|
||||
TRANSPORT_SERIALIZATION_EXCEPTION(org.elasticsearch.transport.TransportSerializationException.class,
|
||||
org.elasticsearch.transport.TransportSerializationException::new, 102),
|
||||
REMOTE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.RemoteTransportException.class,
|
||||
org.elasticsearch.transport.RemoteTransportException::new, 103),
|
||||
ENGINE_CREATION_FAILURE_EXCEPTION(org.elasticsearch.index.engine.EngineCreationFailureException.class,
|
||||
org.elasticsearch.index.engine.EngineCreationFailureException::new, 104),
|
||||
ROUTING_EXCEPTION(org.elasticsearch.cluster.routing.RoutingException.class,
|
||||
org.elasticsearch.cluster.routing.RoutingException::new, 105),
|
||||
INDEX_SHARD_RECOVERY_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveryException.class,
|
||||
org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
|
||||
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class,
|
||||
org.elasticsearch.repositories.RepositoryMissingException::new, 107),
|
||||
PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class,
|
||||
org.elasticsearch.index.percolator.PercolatorException::new, 108),
|
||||
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class,
|
||||
org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
|
||||
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class,
|
||||
org.elasticsearch.index.engine.FlushNotAllowedEngineException::new, 110),
|
||||
NO_CLASS_SETTINGS_EXCEPTION(org.elasticsearch.common.settings.NoClassSettingsException.class,
|
||||
org.elasticsearch.common.settings.NoClassSettingsException::new, 111),
|
||||
BIND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.BindTransportException.class,
|
||||
org.elasticsearch.transport.BindTransportException::new, 112),
|
||||
ALIASES_NOT_FOUND_EXCEPTION(org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException.class,
|
||||
org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException::new, 113),
|
||||
INDEX_SHARD_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveringException.class,
|
||||
org.elasticsearch.index.shard.IndexShardRecoveringException::new, 114),
|
||||
TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TranslogException.class,
|
||||
org.elasticsearch.index.translog.TranslogException::new, 115),
|
||||
PROCESS_CLUSTER_EVENT_TIMEOUT_EXCEPTION(org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class,
|
||||
org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException::new, 116),
|
||||
RETRY_ON_PRIMARY_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException.class,
|
||||
org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException::new, 117),
|
||||
ELASTICSEARCH_TIMEOUT_EXCEPTION(org.elasticsearch.ElasticsearchTimeoutException.class,
|
||||
org.elasticsearch.ElasticsearchTimeoutException::new, 118),
|
||||
QUERY_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.query.QueryPhaseExecutionException.class,
|
||||
org.elasticsearch.search.query.QueryPhaseExecutionException::new, 119),
|
||||
REPOSITORY_VERIFICATION_EXCEPTION(org.elasticsearch.repositories.RepositoryVerificationException.class,
|
||||
org.elasticsearch.repositories.RepositoryVerificationException::new, 120),
|
||||
INVALID_AGGREGATION_PATH_EXCEPTION(org.elasticsearch.search.aggregations.InvalidAggregationPathException.class,
|
||||
org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
|
||||
INDEX_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexAlreadyExistsException.class,
|
||||
org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
|
||||
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class,
|
||||
org.elasticsearch.script.Script.ScriptParseException::new, 124),
|
||||
HTTP_ON_TRANSPORT_EXCEPTION(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class,
|
||||
org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException::new, 125),
|
||||
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class,
|
||||
org.elasticsearch.index.mapper.MapperParsingException::new, 126),
|
||||
SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class,
|
||||
org.elasticsearch.search.SearchContextException::new, 127),
|
||||
SEARCH_SOURCE_BUILDER_EXCEPTION(org.elasticsearch.search.builder.SearchSourceBuilderException.class,
|
||||
org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128),
|
||||
ENGINE_CLOSED_EXCEPTION(org.elasticsearch.index.engine.EngineClosedException.class,
|
||||
org.elasticsearch.index.engine.EngineClosedException::new, 129),
|
||||
NO_SHARD_AVAILABLE_ACTION_EXCEPTION(org.elasticsearch.action.NoShardAvailableActionException.class,
|
||||
org.elasticsearch.action.NoShardAvailableActionException::new, 130),
|
||||
UNAVAILABLE_SHARDS_EXCEPTION(org.elasticsearch.action.UnavailableShardsException.class,
|
||||
org.elasticsearch.action.UnavailableShardsException::new, 131),
|
||||
FLUSH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.FlushFailedEngineException::new, 132),
|
||||
CIRCUIT_BREAKING_EXCEPTION(org.elasticsearch.common.breaker.CircuitBreakingException.class,
|
||||
org.elasticsearch.common.breaker.CircuitBreakingException::new, 133),
|
||||
NODE_NOT_CONNECTED_EXCEPTION(org.elasticsearch.transport.NodeNotConnectedException.class,
|
||||
org.elasticsearch.transport.NodeNotConnectedException::new, 134),
|
||||
STRICT_DYNAMIC_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.StrictDynamicMappingException.class,
|
||||
org.elasticsearch.index.mapper.StrictDynamicMappingException::new, 135),
|
||||
RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class,
|
||||
org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136),
|
||||
TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class,
|
||||
org.elasticsearch.indices.TypeMissingException::new, 137),
|
||||
FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class,
|
||||
org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140),
|
||||
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class,
|
||||
org.elasticsearch.index.query.QueryShardException::new, 141),
|
||||
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,
|
||||
ShardStateAction.NoLongerPrimaryShardException::new, 142);
|
||||
|
||||
|
||||
final Class<? extends ElasticsearchException> exceptionClass;
|
||||
final FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor;
|
||||
final int id;
|
||||
|
||||
ElasticsearchExceptionHandle(Class<? extends ElasticsearchException> exceptionClass, FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor, int id) {
|
||||
<E extends ElasticsearchException> ElasticsearchExceptionHandle(Class<E> exceptionClass,
|
||||
FunctionThatThrowsIOException<StreamInput, E> constructor, int id) {
|
||||
// We need the exceptionClass because you can't dig it out of the constructor reliably.
|
||||
this.exceptionClass = exceptionClass;
|
||||
this.constructor = constructor;
|
||||
this.id = id;
|
||||
|
@ -624,17 +760,17 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
}
|
||||
|
||||
static {
|
||||
final Map<Class<? extends ElasticsearchException>, ElasticsearchExceptionHandle> exceptions = Arrays.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.exceptionClass, e -> e));
|
||||
final Map<Integer, FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException>> idToSupplier = Arrays.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.id, e -> e.constructor));
|
||||
|
||||
ID_TO_SUPPLIER = Collections.unmodifiableMap(idToSupplier);
|
||||
CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE = Collections.unmodifiableMap(exceptions);
|
||||
ID_TO_SUPPLIER = unmodifiableMap(Arrays
|
||||
.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.id, e -> e.constructor)));
|
||||
CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE = unmodifiableMap(Arrays
|
||||
.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.exceptionClass, e -> e)));
|
||||
}
|
||||
|
||||
public String getIndex() {
|
||||
public Index getIndex() {
|
||||
List<String> index = getHeader(INDEX_HEADER_KEY);
|
||||
if (index != null && index.isEmpty() == false) {
|
||||
return index.get(0);
|
||||
List<String> index_uuid = getHeader(INDEX_HEADER_KEY_UUID);
|
||||
return new Index(index.get(0), index_uuid.get(0));
|
||||
}
|
||||
|
||||
return null;
|
||||
|
@ -651,22 +787,28 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
public void setIndex(Index index) {
|
||||
if (index != null) {
|
||||
addHeader(INDEX_HEADER_KEY, index.getName());
|
||||
addHeader(INDEX_HEADER_KEY_UUID, index.getUUID());
|
||||
}
|
||||
}
|
||||
|
||||
public void setIndex(String index) {
|
||||
if (index != null) {
|
||||
addHeader(INDEX_HEADER_KEY, index);
|
||||
setIndex(new Index(index, INDEX_UUID_NA_VALUE));
|
||||
}
|
||||
}
|
||||
|
||||
public void setShard(ShardId shardId) {
|
||||
if (shardId != null) {
|
||||
addHeader(INDEX_HEADER_KEY, shardId.getIndex());
|
||||
setIndex(shardId.getIndex());
|
||||
addHeader(SHARD_HEADER_KEY, Integer.toString(shardId.id()));
|
||||
}
|
||||
}
|
||||
|
||||
public void setShard(String index, int shardId) {
|
||||
setIndex(index);
|
||||
addHeader(SHARD_HEADER_KEY, Integer.toString(shardId));
|
||||
}
|
||||
|
||||
public void setResources(String type, String... id) {
|
||||
assert type != null;
|
||||
addHeader(RESOURCE_HEADER_ID_KEY, id);
|
||||
|
@ -691,9 +833,10 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t);
|
||||
builder.field("root_cause");
|
||||
builder.startArray();
|
||||
for (ElasticsearchException rootCause : rootCauses){
|
||||
for (ElasticsearchException rootCause : rootCauses) {
|
||||
builder.startObject();
|
||||
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
|
||||
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(
|
||||
Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.action.ShardOperationFailedException;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -243,7 +244,12 @@ public final class ExceptionsHelper {
|
|||
|
||||
public GroupBy(Throwable t) {
|
||||
if (t instanceof ElasticsearchException) {
|
||||
index = ((ElasticsearchException) t).getIndex();
|
||||
final Index index = ((ElasticsearchException) t).getIndex();
|
||||
if (index != null) {
|
||||
this.index = index.getName();
|
||||
} else {
|
||||
this.index = null;
|
||||
}
|
||||
} else {
|
||||
index = null;
|
||||
}
|
||||
|
|
|
@ -22,15 +22,15 @@ package org.elasticsearch;
|
|||
import java.security.BasicPermission;
|
||||
|
||||
/**
|
||||
* Elasticsearch-specific permission to check before entering
|
||||
* {@code AccessController.doPrivileged()} blocks.
|
||||
* Elasticsearch-specific permission to check before entering
|
||||
* {@code AccessController.doPrivileged()} blocks.
|
||||
* <p>
|
||||
* We try to avoid these blocks in our code and keep security simple,
|
||||
* but we need them for a few special places to contain hacks for third
|
||||
* We try to avoid these blocks in our code and keep security simple,
|
||||
* but we need them for a few special places to contain hacks for third
|
||||
* party code, or dangerous things used by scripting engines.
|
||||
* <p>
|
||||
* All normal code has this permission, but checking this before truncating the stack
|
||||
* prevents unprivileged code (e.g. scripts), which do not have it, from gaining elevated
|
||||
* prevents unprivileged code (e.g. scripts), which do not have it, from gaining elevated
|
||||
* privileges.
|
||||
* <p>
|
||||
* In other words, don't do this:
|
||||
|
@ -57,9 +57,6 @@ import java.security.BasicPermission;
|
|||
* </code></pre>
|
||||
*/
|
||||
public final class SpecialPermission extends BasicPermission {
|
||||
|
||||
private static final long serialVersionUID = -4129500096157408168L;
|
||||
|
||||
/**
|
||||
* Creates a new SpecialPermision object.
|
||||
*/
|
||||
|
@ -68,11 +65,11 @@ public final class SpecialPermission extends BasicPermission {
|
|||
// but let's just keep it simple if we can.
|
||||
super("*");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a new SpecialPermission object.
|
||||
* This constructor exists for use by the {@code Policy} object to instantiate new Permission objects.
|
||||
*
|
||||
*
|
||||
* @param name ignored
|
||||
* @param actions ignored
|
||||
*/
|
||||
|
|
|
@ -254,7 +254,11 @@ public class Version {
|
|||
public static final int V_1_7_3_ID = 1070399;
|
||||
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_4_ID = 1070499;
|
||||
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_5_ID = 1070599;
|
||||
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_6_ID = 1070699;
|
||||
public static final Version V_1_7_6 = new Version(V_1_7_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
|
||||
public static final int V_2_0_0_beta1_ID = 2000001;
|
||||
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
|
@ -275,11 +279,15 @@ public class Version {
|
|||
public static final int V_2_1_1_ID = 2010199;
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_2_ID = 2010299;
|
||||
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_3_ID = 2010399;
|
||||
public static final Version V_2_1_3 = new Version(V_2_1_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_2_0_ID = 2020099;
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_2_1_ID = 2020199;
|
||||
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_3_0_ID = 2030099;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_3_0_0_ID = 3000099;
|
||||
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final Version CURRENT = V_3_0_0;
|
||||
|
@ -299,8 +307,12 @@ public class Version {
|
|||
return V_3_0_0;
|
||||
case V_2_3_0_ID:
|
||||
return V_2_3_0;
|
||||
case V_2_2_1_ID:
|
||||
return V_2_2_1;
|
||||
case V_2_2_0_ID:
|
||||
return V_2_2_0;
|
||||
case V_2_1_3_ID:
|
||||
return V_2_1_3;
|
||||
case V_2_1_2_ID:
|
||||
return V_2_1_2;
|
||||
case V_2_1_1_ID:
|
||||
|
@ -321,6 +333,10 @@ public class Version {
|
|||
return V_2_0_0_beta2;
|
||||
case V_2_0_0_beta1_ID:
|
||||
return V_2_0_0_beta1;
|
||||
case V_1_7_6_ID:
|
||||
return V_1_7_6;
|
||||
case V_1_7_5_ID:
|
||||
return V_1_7_5;
|
||||
case V_1_7_4_ID:
|
||||
return V_1_7_4;
|
||||
case V_1_7_3_ID:
|
||||
|
|
|
@ -28,6 +28,8 @@ import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction
|
|||
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
||||
|
@ -149,6 +151,16 @@ import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptAction;
|
|||
import org.elasticsearch.action.indexedscripts.get.TransportGetIndexedScriptAction;
|
||||
import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptAction;
|
||||
import org.elasticsearch.action.indexedscripts.put.TransportPutIndexedScriptAction;
|
||||
import org.elasticsearch.action.ingest.IngestActionFilter;
|
||||
import org.elasticsearch.action.ingest.IngestProxyActionFilter;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineAction;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.GetPipelineAction;
|
||||
import org.elasticsearch.action.ingest.GetPipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.PutPipelineAction;
|
||||
import org.elasticsearch.action.ingest.PutPipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineAction;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineTransportAction;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.PercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportMultiPercolateAction;
|
||||
|
@ -180,7 +192,6 @@ import org.elasticsearch.action.termvectors.TermVectorsAction;
|
|||
import org.elasticsearch.action.termvectors.TransportMultiTermVectorsAction;
|
||||
import org.elasticsearch.action.termvectors.TransportShardMultiTermsVectorAction;
|
||||
import org.elasticsearch.action.termvectors.TransportTermVectorsAction;
|
||||
import org.elasticsearch.action.termvectors.dfs.TransportDfsOnlyAction;
|
||||
import org.elasticsearch.action.update.TransportUpdateAction;
|
||||
import org.elasticsearch.action.update.UpdateAction;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
|
@ -200,7 +211,7 @@ public class ActionModule extends AbstractModule {
|
|||
private final Map<String, ActionEntry> actions = new HashMap<>();
|
||||
private final List<Class<? extends ActionFilter>> actionFilters = new ArrayList<>();
|
||||
|
||||
static class ActionEntry<Request extends ActionRequest, Response extends ActionResponse> {
|
||||
static class ActionEntry<Request extends ActionRequest<Request>, Response extends ActionResponse> {
|
||||
public final GenericAction<Request, Response> action;
|
||||
public final Class<? extends TransportAction<Request, Response>> transportAction;
|
||||
public final Class[] supportTransportActions;
|
||||
|
@ -210,13 +221,13 @@ public class ActionModule extends AbstractModule {
|
|||
this.transportAction = transportAction;
|
||||
this.supportTransportActions = supportTransportActions;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
private final boolean ingestEnabled;
|
||||
private final boolean proxy;
|
||||
|
||||
public ActionModule(boolean proxy) {
|
||||
public ActionModule(boolean ingestEnabled, boolean proxy) {
|
||||
this.ingestEnabled = ingestEnabled;
|
||||
this.proxy = proxy;
|
||||
}
|
||||
|
||||
|
@ -229,7 +240,7 @@ public class ActionModule extends AbstractModule {
|
|||
* @param <Request> The request type.
|
||||
* @param <Response> The response type.
|
||||
*/
|
||||
public <Request extends ActionRequest, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
|
||||
actions.put(action.name(), new ActionEntry<>(action, transportAction, supportTransportActions));
|
||||
}
|
||||
|
||||
|
@ -240,6 +251,13 @@ public class ActionModule extends AbstractModule {
|
|||
|
||||
@Override
|
||||
protected void configure() {
|
||||
if (proxy == false) {
|
||||
if (ingestEnabled) {
|
||||
registerFilter(IngestActionFilter.class);
|
||||
} else {
|
||||
registerFilter(IngestProxyActionFilter.class);
|
||||
}
|
||||
}
|
||||
|
||||
Multibinder<ActionFilter> actionFilterMultibinder = Multibinder.newSetBinder(binder(), ActionFilter.class);
|
||||
for (Class<? extends ActionFilter> actionFilter : actionFilters) {
|
||||
|
@ -252,6 +270,7 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
|
||||
registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
|
||||
registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class);
|
||||
registerAction(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class);
|
||||
|
||||
registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
|
||||
registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
|
||||
|
@ -304,8 +323,7 @@ public class ActionModule extends AbstractModule {
|
|||
|
||||
registerAction(IndexAction.INSTANCE, TransportIndexAction.class);
|
||||
registerAction(GetAction.INSTANCE, TransportGetAction.class);
|
||||
registerAction(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class,
|
||||
TransportDfsOnlyAction.class);
|
||||
registerAction(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class);
|
||||
registerAction(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class,
|
||||
TransportShardMultiTermsVectorAction.class);
|
||||
registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class);
|
||||
|
@ -340,6 +358,11 @@ public class ActionModule extends AbstractModule {
|
|||
|
||||
registerAction(FieldStatsAction.INSTANCE, TransportFieldStatsTransportAction.class);
|
||||
|
||||
registerAction(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class);
|
||||
registerAction(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class);
|
||||
registerAction(DeletePipelineAction.INSTANCE, DeletePipelineTransportAction.class);
|
||||
registerAction(SimulatePipelineAction.INSTANCE, SimulatePipelineTransportAction.class);
|
||||
|
||||
// register Name -> GenericAction Map that can be injected to instances.
|
||||
MapBinder<String, GenericAction> actionsBinder
|
||||
= MapBinder.newMapBinder(binder(), String.class, GenericAction.class);
|
||||
|
|
|
@ -28,14 +28,10 @@ import java.io.IOException;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class ActionRequest<T extends ActionRequest> extends TransportRequest {
|
||||
public abstract class ActionRequest<Request extends ActionRequest<Request>> extends TransportRequest {
|
||||
|
||||
public ActionRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
protected ActionRequest(ActionRequest request) {
|
||||
super(request);
|
||||
// this does not set the listenerThreaded API, if needed, its up to the caller to set it
|
||||
// since most times, we actually want it to not be threaded...
|
||||
// this.listenerThreaded = request.listenerThreaded();
|
||||
|
|
|
@ -49,12 +49,6 @@ public abstract class ActionRequestBuilder<Request extends ActionRequest, Respon
|
|||
return this.request;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder putHeader(String key, Object value) {
|
||||
request.putHeader(key, value);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
|
||||
public ListenableActionFuture<Response> execute() {
|
||||
PlainListenableActionFuture<Response> future = new PlainListenableActionFuture<>(threadPool);
|
||||
execute(future);
|
||||
|
|
|
@ -56,7 +56,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
|
|||
* The index the document was changed in.
|
||||
*/
|
||||
public String getIndex() {
|
||||
return this.shardId.getIndex();
|
||||
return this.shardId.getIndexName();
|
||||
}
|
||||
|
||||
|
||||
|
@ -89,8 +89,8 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the sequence number assigned for this change. Returns {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} if the operation wasn't
|
||||
* performed (i.e., an update operation that resulted in a NOOP).
|
||||
* Returns the sequence number assigned for this change. Returns {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} if the operation
|
||||
* wasn't performed (i.e., an update operation that resulted in a NOOP).
|
||||
*/
|
||||
public long getSeqNo() {
|
||||
return seqNo;
|
||||
|
@ -133,10 +133,10 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
ReplicationResponse.ShardInfo shardInfo = getShardInfo();
|
||||
builder.field(Fields._INDEX, shardId.getIndex())
|
||||
.field(Fields._TYPE, type)
|
||||
.field(Fields._ID, id)
|
||||
.field(Fields._VERSION, version);
|
||||
builder.field(Fields._INDEX, shardId.getIndexName())
|
||||
.field(Fields._TYPE, type)
|
||||
.field(Fields._ID, id)
|
||||
.field(Fields._VERSION, version);
|
||||
shardInfo.toXContent(builder, params);
|
||||
//nocommit: i'm not sure we want to expose it in the api but it will be handy for debugging while we work...
|
||||
builder.field(Fields._SHARD_ID, shardId.id());
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.Streamable;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -169,15 +170,13 @@ public class ReplicationResponse extends ActionResponse {
|
|||
|
||||
public static class Failure implements ShardOperationFailedException, ToXContent {
|
||||
|
||||
private String index;
|
||||
private int shardId;
|
||||
private ShardId shardId;
|
||||
private String nodeId;
|
||||
private Throwable cause;
|
||||
private RestStatus status;
|
||||
private boolean primary;
|
||||
|
||||
public Failure(String index, int shardId, @Nullable String nodeId, Throwable cause, RestStatus status, boolean primary) {
|
||||
this.index = index;
|
||||
public Failure(ShardId shardId, @Nullable String nodeId, Throwable cause, RestStatus status, boolean primary) {
|
||||
this.shardId = shardId;
|
||||
this.nodeId = nodeId;
|
||||
this.cause = cause;
|
||||
|
@ -193,7 +192,7 @@ public class ReplicationResponse extends ActionResponse {
|
|||
*/
|
||||
@Override
|
||||
public String index() {
|
||||
return index;
|
||||
return shardId.getIndexName();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -201,6 +200,10 @@ public class ReplicationResponse extends ActionResponse {
|
|||
*/
|
||||
@Override
|
||||
public int shardId() {
|
||||
return shardId.id();
|
||||
}
|
||||
|
||||
public ShardId fullShardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
|
@ -243,8 +246,7 @@ public class ReplicationResponse extends ActionResponse {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
index = in.readString();
|
||||
shardId = in.readVInt();
|
||||
shardId = ShardId.readShardId(in);
|
||||
nodeId = in.readOptionalString();
|
||||
cause = in.readThrowable();
|
||||
status = RestStatus.readFrom(in);
|
||||
|
@ -253,8 +255,7 @@ public class ReplicationResponse extends ActionResponse {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(index);
|
||||
out.writeVInt(shardId);
|
||||
shardId.writeTo(out);
|
||||
out.writeOptionalString(nodeId);
|
||||
out.writeThrowable(cause);
|
||||
RestStatus.writeTo(out, status);
|
||||
|
@ -264,8 +265,8 @@ public class ReplicationResponse extends ActionResponse {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Fields._INDEX, index);
|
||||
builder.field(Fields._SHARD, shardId);
|
||||
builder.field(Fields._INDEX, shardId.getIndexName());
|
||||
builder.field(Fields._SHARD, shardId.id());
|
||||
builder.field(Fields._NODE, nodeId);
|
||||
builder.field(Fields.REASON);
|
||||
builder.startObject();
|
||||
|
|
|
@ -36,13 +36,19 @@ public class UnavailableShardsException extends ElasticsearchException {
|
|||
super(buildMessage(shardId, message), args);
|
||||
}
|
||||
|
||||
public UnavailableShardsException(String index, int shardId, String message, Object... args) {
|
||||
super(buildMessage(index, shardId, message), args);
|
||||
}
|
||||
|
||||
private static String buildMessage(ShardId shardId, String message) {
|
||||
if (shardId == null) {
|
||||
return message;
|
||||
}
|
||||
return "[" + shardId.index().name() + "][" + shardId.id() + "] " + message;
|
||||
return buildMessage(shardId.getIndexName(), shardId.id(), message);
|
||||
}
|
||||
|
||||
private static String buildMessage(String index, int shardId, String message) {return "[" + index + "][" + shardId + "] " + message;}
|
||||
|
||||
public UnavailableShardsException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
|
|
|
@ -141,7 +141,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
}
|
||||
|
||||
assert waitFor >= 0;
|
||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger);
|
||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext());
|
||||
final ClusterState state = observer.observedState();
|
||||
if (waitFor == 0 || request.timeout().millis() == 0) {
|
||||
listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0));
|
||||
|
|
|
@ -102,7 +102,7 @@ public class TransportNodesHotThreadsAction extends TransportNodesAction<NodesHo
|
|||
}
|
||||
|
||||
NodeRequest(String nodeId, NodesHotThreadsRequest request) {
|
||||
super(request, nodeId);
|
||||
super(nodeId);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesI
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets to reutrn all the data.
|
||||
* Sets to return all the data.
|
||||
*/
|
||||
public NodesInfoRequestBuilder all() {
|
||||
request.all();
|
||||
|
|
|
@ -96,7 +96,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
|||
}
|
||||
|
||||
NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
||||
super(request, nodeId);
|
||||
super(nodeId);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ public class TransportNodesStatsAction extends TransportNodesAction<NodesStatsRe
|
|||
}
|
||||
|
||||
NodeStatsRequest(String nodeId, NodesStatsRequest request) {
|
||||
super(request, nodeId);
|
||||
super(nodeId);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Action for cancelling running tasks
|
||||
*/
|
||||
public class CancelTasksAction extends Action<CancelTasksRequest, CancelTasksResponse, CancelTasksRequestBuilder> {
|
||||
|
||||
public static final CancelTasksAction INSTANCE = new CancelTasksAction();
|
||||
public static final String NAME = "cluster:admin/tasks/cancel";
|
||||
|
||||
private CancelTasksAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CancelTasksResponse newResponse() {
|
||||
return new CancelTasksResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CancelTasksRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new CancelTasksRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -17,57 +17,57 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.termvectors.dfs;
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
||||
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A response of a dfs only request.
|
||||
* A request to cancel tasks
|
||||
*/
|
||||
public class DfsOnlyResponse extends BroadcastResponse {
|
||||
public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
|
||||
|
||||
private AggregatedDfs dfs;
|
||||
private long tookInMillis;
|
||||
public static final String DEFAULT_REASON = "by user request";
|
||||
|
||||
DfsOnlyResponse(AggregatedDfs dfs, int totalShards, int successfulShards, int failedShards,
|
||||
List<ShardOperationFailedException> shardFailures, long tookInMillis) {
|
||||
super(totalShards, successfulShards, failedShards, shardFailures);
|
||||
this.dfs = dfs;
|
||||
this.tookInMillis = tookInMillis;
|
||||
}
|
||||
private String reason = DEFAULT_REASON;
|
||||
|
||||
public AggregatedDfs getDfs() {
|
||||
return dfs;
|
||||
}
|
||||
|
||||
public TimeValue getTook() {
|
||||
return new TimeValue(tookInMillis);
|
||||
}
|
||||
|
||||
public long getTookInMillis() {
|
||||
return tookInMillis;
|
||||
/**
|
||||
* Cancel tasks on the specified nodes. If none are passed, all cancellable tasks on
|
||||
* all nodes will be cancelled.
|
||||
*/
|
||||
public CancelTasksRequest(String... nodesIds) {
|
||||
super(nodesIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
AggregatedDfs.readAggregatedDfs(in);
|
||||
tookInMillis = in.readVLong();
|
||||
reason = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
dfs.writeTo(out);
|
||||
out.writeVLong(tookInMillis);
|
||||
out.writeString(reason);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(Task task) {
|
||||
return super.match(task) && task instanceof CancellableTask;
|
||||
}
|
||||
|
||||
public CancelTasksRequest reason(String reason) {
|
||||
this.reason = reason;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String reason() {
|
||||
return reason;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
||||
|
||||
import org.elasticsearch.action.support.tasks.TasksRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Builder for the request to cancel tasks running on the specified nodes
|
||||
*/
|
||||
public class CancelTasksRequestBuilder extends TasksRequestBuilder<CancelTasksRequest, CancelTasksResponse, CancelTasksRequestBuilder> {
|
||||
|
||||
public CancelTasksRequestBuilder(ElasticsearchClient client, CancelTasksAction action) {
|
||||
super(client, action, new CancelTasksRequest());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
||||
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Returns the list of tasks that were cancelled
|
||||
*/
|
||||
public class CancelTasksResponse extends ListTasksResponse {
|
||||
|
||||
public CancelTasksResponse() {
|
||||
}
|
||||
|
||||
public CancelTasksResponse(List<TaskInfo> tasks, List<TaskOperationFailure> taskFailures, List<? extends FailedNodeException>
|
||||
nodeFailures) {
|
||||
super(tasks, taskFailures, nodeFailures);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,285 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
|
||||
import org.elasticsearch.action.support.tasks.TransportTasksAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* Transport action that can be used to cancel currently running cancellable tasks.
|
||||
* <p>
|
||||
* For a task to be cancellable it has to return an instance of
|
||||
* {@link CancellableTask} from {@link TransportRequest#createTask(long, String, String)}
|
||||
*/
|
||||
public class TransportCancelTasksAction extends TransportTasksAction<CancellableTask, CancelTasksRequest, CancelTasksResponse, TaskInfo> {
|
||||
|
||||
public static final String BAN_PARENT_ACTION_NAME = "internal:admin/tasks/ban";
|
||||
|
||||
@Inject
|
||||
public TransportCancelTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver
|
||||
indexNameExpressionResolver) {
|
||||
super(settings, CancelTasksAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters,
|
||||
indexNameExpressionResolver, CancelTasksRequest::new, CancelTasksResponse::new, ThreadPool.Names.MANAGEMENT);
|
||||
transportService.registerRequestHandler(BAN_PARENT_ACTION_NAME, BanParentTaskRequest::new, ThreadPool.Names.SAME, new
|
||||
BanParentRequestHandler());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CancelTasksResponse newResponse(CancelTasksRequest request, List<TaskInfo> tasks, List<TaskOperationFailure>
|
||||
taskOperationFailures, List<FailedNodeException> failedNodeExceptions) {
|
||||
return new CancelTasksResponse(tasks, taskOperationFailures, failedNodeExceptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TaskInfo readTaskResponse(StreamInput in) throws IOException {
|
||||
return new TaskInfo(in);
|
||||
}
|
||||
|
||||
protected void processTasks(CancelTasksRequest request, Consumer<CancellableTask> operation) {
|
||||
if (request.taskId() != BaseTasksRequest.ALL_TASKS) {
|
||||
// we are only checking one task, we can optimize it
|
||||
CancellableTask task = taskManager.getCancellableTask(request.taskId());
|
||||
if (task != null) {
|
||||
if (request.match(task)) {
|
||||
operation.accept(task);
|
||||
} else {
|
||||
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support this operation");
|
||||
}
|
||||
} else {
|
||||
if (taskManager.getTask(request.taskId()) != null) {
|
||||
// The task exists, but doesn't support cancellation
|
||||
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support cancellation");
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.taskId());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (CancellableTask task : taskManager.getCancellableTasks().values()) {
|
||||
if (request.match(task)) {
|
||||
operation.accept(task);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized TaskInfo taskOperation(CancelTasksRequest request, CancellableTask cancellableTask) {
|
||||
final BanLock banLock = new BanLock(nodes -> removeBanOnNodes(cancellableTask, nodes));
|
||||
Set<String> childNodes = taskManager.cancel(cancellableTask, request.reason(), banLock::onTaskFinished);
|
||||
if (childNodes != null) {
|
||||
if (childNodes.isEmpty()) {
|
||||
logger.trace("cancelling task {} with no children", cancellableTask.getId());
|
||||
return cancellableTask.taskInfo(clusterService.localNode(), false);
|
||||
} else {
|
||||
logger.trace("cancelling task {} with children on nodes [{}]", cancellableTask.getId(), childNodes);
|
||||
setBanOnNodes(request.reason(), cancellableTask, childNodes, banLock);
|
||||
return cancellableTask.taskInfo(clusterService.localNode(), false);
|
||||
}
|
||||
} else {
|
||||
logger.trace("task {} is already cancelled", cancellableTask.getId());
|
||||
throw new IllegalStateException("task with id " + cancellableTask.getId() + " is already cancelled");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return true;
|
||||
}
|
||||
|
||||
private void setBanOnNodes(String reason, CancellableTask task, Set<String> nodes, BanLock banLock) {
|
||||
sendSetBanRequest(nodes, new BanParentTaskRequest(clusterService.localNode().getId(), task.getId(), reason), banLock);
|
||||
}
|
||||
|
||||
private void removeBanOnNodes(CancellableTask task, Set<String> nodes) {
|
||||
sendRemoveBanRequest(nodes, new BanParentTaskRequest(clusterService.localNode().getId(), task.getId()));
|
||||
}
|
||||
|
||||
private void sendSetBanRequest(Set<String> nodes, BanParentTaskRequest request, BanLock banLock) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
for (String node : nodes) {
|
||||
DiscoveryNode discoveryNode = clusterState.getNodes().get(node);
|
||||
if (discoveryNode != null) {
|
||||
// Check if node still in the cluster
|
||||
logger.debug("Sending ban for tasks with the parent [{}:{}] to the node [{}], ban [{}]", request.parentNodeId, request
|
||||
.parentTaskId, node, request.ban);
|
||||
transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request,
|
||||
new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
public void handleResponse(TransportResponse.Empty response) {
|
||||
banLock.onBanSet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
banLock.onBanSet();
|
||||
}
|
||||
});
|
||||
} else {
|
||||
banLock.onBanSet();
|
||||
logger.debug("Cannot send ban for tasks with the parent [{}:{}] to the node [{}] - the node no longer in the cluster",
|
||||
request.parentNodeId, request.parentTaskId, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void sendRemoveBanRequest(Set<String> nodes, BanParentTaskRequest request) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
for (String node : nodes) {
|
||||
DiscoveryNode discoveryNode = clusterState.getNodes().get(node);
|
||||
if (discoveryNode != null) {
|
||||
// Check if node still in the cluster
|
||||
logger.debug("Sending remove ban for tasks with the parent [{}:{}] to the node [{}]", request.parentNodeId,
|
||||
request.parentTaskId, node);
|
||||
transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request, EmptyTransportResponseHandler
|
||||
.INSTANCE_SAME);
|
||||
} else {
|
||||
logger.debug("Cannot send remove ban request for tasks with the parent [{}:{}] to the node [{}] - the node no longer in " +
|
||||
"the cluster", request.parentNodeId, request.parentTaskId, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class BanLock {
|
||||
private final Consumer<Set<String>> finish;
|
||||
private final AtomicInteger counter;
|
||||
private final AtomicReference<Set<String>> nodes = new AtomicReference<>();
|
||||
|
||||
public BanLock(Consumer<Set<String>> finish) {
|
||||
counter = new AtomicInteger(0);
|
||||
this.finish = finish;
|
||||
}
|
||||
|
||||
public void onBanSet() {
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finish();
|
||||
}
|
||||
}
|
||||
|
||||
public void onTaskFinished(Set<String> nodes) {
|
||||
this.nodes.set(nodes);
|
||||
if (counter.addAndGet(nodes.size()) == 0) {
|
||||
finish();
|
||||
}
|
||||
}
|
||||
|
||||
public void finish() {
|
||||
finish.accept(nodes.get());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class BanParentTaskRequest extends TransportRequest {
|
||||
|
||||
private String parentNodeId;
|
||||
|
||||
private long parentTaskId;
|
||||
|
||||
private boolean ban;
|
||||
|
||||
private String reason;
|
||||
|
||||
BanParentTaskRequest(String parentNodeId, long parentTaskId, String reason) {
|
||||
this.parentNodeId = parentNodeId;
|
||||
this.parentTaskId = parentTaskId;
|
||||
this.ban = true;
|
||||
this.reason = reason;
|
||||
}
|
||||
|
||||
BanParentTaskRequest(String parentNodeId, long parentTaskId) {
|
||||
this.parentNodeId = parentNodeId;
|
||||
this.parentTaskId = parentTaskId;
|
||||
this.ban = false;
|
||||
}
|
||||
|
||||
public BanParentTaskRequest() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
parentNodeId = in.readString();
|
||||
parentTaskId = in.readLong();
|
||||
ban = in.readBoolean();
|
||||
if (ban) {
|
||||
reason = in.readString();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(parentNodeId);
|
||||
out.writeLong(parentTaskId);
|
||||
out.writeBoolean(ban);
|
||||
if (ban) {
|
||||
out.writeString(reason);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class BanParentRequestHandler implements TransportRequestHandler<BanParentTaskRequest> {
|
||||
@Override
|
||||
public void messageReceived(final BanParentTaskRequest request, final TransportChannel channel) throws Exception {
|
||||
if (request.ban) {
|
||||
logger.debug("Received ban for the parent [{}:{}] on the node [{}], reason: [{}]", request.parentNodeId, request
|
||||
.parentTaskId, clusterService.localNode().getId(), request.reason);
|
||||
taskManager.setBan(request.parentNodeId, request.parentTaskId, request.reason);
|
||||
} else {
|
||||
logger.debug("Removing ban for the parent [{}:{}] on the node [{}]", request.parentNodeId, request.parentTaskId,
|
||||
clusterService.localNode().getId());
|
||||
taskManager.removeBan(request.parentNodeId, request.parentTaskId);
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
|
||||
|
@ -104,15 +105,19 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
if (getTaskFailures() != null && getTaskFailures().size() > 0) {
|
||||
builder.startArray("task_failures");
|
||||
for (TaskOperationFailure ex : getTaskFailures()){
|
||||
builder.startObject();
|
||||
builder.value(ex);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
||||
if (getNodeFailures() != null && getNodeFailures().size() > 0) {
|
||||
builder.startArray("node_failures");
|
||||
for (FailedNodeException ex : getNodeFailures()){
|
||||
builder.value(ex);
|
||||
for (FailedNodeException ex : getNodeFailures()) {
|
||||
builder.startObject();
|
||||
ex.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -48,20 +49,23 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
|
||||
private final String description;
|
||||
|
||||
private final Task.Status status;
|
||||
|
||||
private final String parentNode;
|
||||
|
||||
private final long parentId;
|
||||
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description) {
|
||||
this(node, id, type, action, description, null, -1L);
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status) {
|
||||
this(node, id, type, action, description, status, null, -1L);
|
||||
}
|
||||
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, String parentNode, long parentId) {
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status, String parentNode, long parentId) {
|
||||
this.node = node;
|
||||
this.id = id;
|
||||
this.type = type;
|
||||
this.action = action;
|
||||
this.description = description;
|
||||
this.status = status;
|
||||
this.parentNode = parentNode;
|
||||
this.parentId = parentId;
|
||||
}
|
||||
|
@ -72,6 +76,11 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
type = in.readString();
|
||||
action = in.readString();
|
||||
description = in.readOptionalString();
|
||||
if (in.readBoolean()) {
|
||||
status = in.readTaskStatus();
|
||||
} else {
|
||||
status = null;
|
||||
}
|
||||
parentNode = in.readOptionalString();
|
||||
parentId = in.readLong();
|
||||
}
|
||||
|
@ -96,6 +105,14 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
return description;
|
||||
}
|
||||
|
||||
/**
|
||||
* The status of the running task. Only available if TaskInfos were build
|
||||
* with the detailed flag.
|
||||
*/
|
||||
public Task.Status getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public String getParentNode() {
|
||||
return parentNode;
|
||||
}
|
||||
|
@ -116,6 +133,12 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
out.writeString(type);
|
||||
out.writeString(action);
|
||||
out.writeOptionalString(description);
|
||||
if (status != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeTaskStatus(status);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
out.writeOptionalString(parentNode);
|
||||
out.writeLong(parentId);
|
||||
}
|
||||
|
@ -127,6 +150,9 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
builder.field("id", id);
|
||||
builder.field("type", type);
|
||||
builder.field("action", action);
|
||||
if (status != null) {
|
||||
builder.field("status", status, params);
|
||||
}
|
||||
if (description != null) {
|
||||
builder.field("description", description);
|
||||
}
|
||||
|
|
|
@ -30,17 +30,17 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportListTasksAction extends TransportTasksAction<ListTasksRequest, ListTasksResponse, TaskInfo> {
|
||||
public class TransportListTasksAction extends TransportTasksAction<Task, ListTasksRequest, ListTasksResponse, TaskInfo> {
|
||||
|
||||
@Inject
|
||||
public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
|
|
|
@ -22,14 +22,9 @@ package org.elasticsearch.action.admin.cluster.settings;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.cluster.ClusterState.builder;
|
||||
|
||||
/**
|
||||
|
@ -57,11 +52,11 @@ final class SettingsUpdater {
|
|||
boolean changed = false;
|
||||
Settings.Builder transientSettings = Settings.settingsBuilder();
|
||||
transientSettings.put(currentState.metaData().transientSettings());
|
||||
changed |= apply(transientToApply, transientSettings, transientUpdates, "transient");
|
||||
changed |= clusterSettings.updateDynamicSettings(transientToApply, transientSettings, transientUpdates, "transient");
|
||||
|
||||
Settings.Builder persistentSettings = Settings.settingsBuilder();
|
||||
persistentSettings.put(currentState.metaData().persistentSettings());
|
||||
changed |= apply(persistentToApply, persistentSettings, persistentUpdates, "persistent");
|
||||
changed |= clusterSettings.updateDynamicSettings(persistentToApply, persistentSettings, persistentUpdates, "persistent");
|
||||
|
||||
if (!changed) {
|
||||
return currentState;
|
||||
|
@ -86,42 +81,5 @@ final class SettingsUpdater {
|
|||
return build;
|
||||
}
|
||||
|
||||
private boolean apply(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
|
||||
boolean changed = false;
|
||||
final Set<String> toRemove = new HashSet<>();
|
||||
Settings.Builder settingsBuilder = Settings.settingsBuilder();
|
||||
for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) {
|
||||
if (entry.getValue() == null) {
|
||||
toRemove.add(entry.getKey());
|
||||
} else if (clusterSettings.isLoggerSetting(entry.getKey()) || clusterSettings.hasDynamicSetting(entry.getKey())) {
|
||||
settingsBuilder.put(entry.getKey(), entry.getValue());
|
||||
updates.put(entry.getKey(), entry.getValue());
|
||||
changed = true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
|
||||
}
|
||||
|
||||
}
|
||||
changed |= applyDeletes(toRemove, target);
|
||||
target.put(settingsBuilder.build());
|
||||
return changed;
|
||||
}
|
||||
|
||||
private final boolean applyDeletes(Set<String> deletes, Settings.Builder builder) {
|
||||
boolean changed = false;
|
||||
for (String entry : deletes) {
|
||||
Set<String> keysToRemove = new HashSet<>();
|
||||
Set<String> keySet = builder.internalMap().keySet();
|
||||
for (String key : keySet) {
|
||||
if (Regex.simpleMatch(entry, key)) {
|
||||
keysToRemove.add(key);
|
||||
}
|
||||
}
|
||||
for (String key : keysToRemove) {
|
||||
builder.remove(key);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -32,7 +33,7 @@ import java.io.IOException;
|
|||
*/
|
||||
public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
||||
|
||||
private String index;
|
||||
private Index index;
|
||||
private int shardId;
|
||||
ShardRouting[] shards;
|
||||
|
||||
|
@ -40,7 +41,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
|||
|
||||
}
|
||||
|
||||
public ClusterSearchShardsGroup(String index, int shardId, ShardRouting[] shards) {
|
||||
public ClusterSearchShardsGroup(Index index, int shardId, ShardRouting[] shards) {
|
||||
this.index = index;
|
||||
this.shardId = shardId;
|
||||
this.shards = shards;
|
||||
|
@ -53,7 +54,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
public String getIndex() {
|
||||
return index;
|
||||
return index.getName();
|
||||
}
|
||||
|
||||
public int getShardId() {
|
||||
|
@ -66,7 +67,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
index = in.readString();
|
||||
index = Index.readIndex(in);
|
||||
shardId = in.readVInt();
|
||||
shards = new ShardRouting[in.readVInt()];
|
||||
for (int i = 0; i < shards.length; i++) {
|
||||
|
@ -76,7 +77,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(index);
|
||||
index.writeTo(out);
|
||||
out.writeVInt(shardId);
|
||||
out.writeVInt(shards.length);
|
||||
for (ShardRouting shardRouting : shards) {
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.ShardIterator;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -77,7 +78,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
|
|||
ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()];
|
||||
int currentGroup = 0;
|
||||
for (ShardIterator shardIt : groupShardsIterator) {
|
||||
String index = shardIt.shardId().getIndex();
|
||||
Index index = shardIt.shardId().getIndex();
|
||||
int shardId = shardIt.shardId().getId();
|
||||
ShardRouting[] shardRoutings = new ShardRouting[shardIt.size()];
|
||||
int currentShard = 0;
|
||||
|
|
|
@ -45,7 +45,7 @@ import static org.elasticsearch.common.Strings.hasLength;
|
|||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
|
||||
/**
|
||||
* Create snapshot request
|
||||
|
@ -379,14 +379,14 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
|
|||
throw new IllegalArgumentException("malformed indices section, should be an array of strings");
|
||||
}
|
||||
} else if (name.equals("partial")) {
|
||||
partial(nodeBooleanValue(entry.getValue()));
|
||||
partial(lenientNodeBooleanValue(entry.getValue()));
|
||||
} else if (name.equals("settings")) {
|
||||
if (!(entry.getValue() instanceof Map)) {
|
||||
throw new IllegalArgumentException("malformed settings section, should indices an inner object");
|
||||
}
|
||||
settings((Map<String, Object>) entry.getValue());
|
||||
} else if (name.equals("include_global_state")) {
|
||||
includeGlobalState = nodeBooleanValue(entry.getValue());
|
||||
includeGlobalState = lenientNodeBooleanValue(entry.getValue());
|
||||
}
|
||||
}
|
||||
indicesOptions(IndicesOptions.fromMap((Map<String, Object>) source, IndicesOptions.lenientExpandOpen()));
|
||||
|
|
|
@ -43,7 +43,7 @@ import static org.elasticsearch.common.Strings.hasLength;
|
|||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
|
||||
/**
|
||||
* Restore snapshot request
|
||||
|
@ -498,16 +498,16 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
|
|||
throw new IllegalArgumentException("malformed indices section, should be an array of strings");
|
||||
}
|
||||
} else if (name.equals("partial")) {
|
||||
partial(nodeBooleanValue(entry.getValue()));
|
||||
partial(lenientNodeBooleanValue(entry.getValue()));
|
||||
} else if (name.equals("settings")) {
|
||||
if (!(entry.getValue() instanceof Map)) {
|
||||
throw new IllegalArgumentException("malformed settings section");
|
||||
}
|
||||
settings((Map<String, Object>) entry.getValue());
|
||||
} else if (name.equals("include_global_state")) {
|
||||
includeGlobalState = nodeBooleanValue(entry.getValue());
|
||||
includeGlobalState = lenientNodeBooleanValue(entry.getValue());
|
||||
} else if (name.equals("include_aliases")) {
|
||||
includeAliases = nodeBooleanValue(entry.getValue());
|
||||
includeAliases = lenientNodeBooleanValue(entry.getValue());
|
||||
} else if (name.equals("rename_pattern")) {
|
||||
if (entry.getValue() instanceof String) {
|
||||
renamePattern((String) entry.getValue());
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
|
@ -146,8 +145,8 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
public Request() {
|
||||
}
|
||||
|
||||
public Request(ActionRequest request, String[] nodesIds) {
|
||||
super(request, nodesIds);
|
||||
public Request(String[] nodesIds) {
|
||||
super(nodesIds);
|
||||
}
|
||||
|
||||
public Request snapshotIds(SnapshotId[] snapshotIds) {
|
||||
|
@ -180,6 +179,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
this.failures = failures;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FailedNodeException[] failures() {
|
||||
return failures;
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
}
|
||||
|
||||
NodeRequest(String nodeId, TransportNodesSnapshotsStatus.Request request) {
|
||||
super(request, nodeId);
|
||||
super(nodeId);
|
||||
snapshotIds = request.snapshotIds;
|
||||
}
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
snapshotIds[i] = currentSnapshots.get(i).snapshotId();
|
||||
}
|
||||
|
||||
TransportNodesSnapshotsStatus.Request nodesRequest = new TransportNodesSnapshotsStatus.Request(request, nodesIds.toArray(new String[nodesIds.size()]))
|
||||
TransportNodesSnapshotsStatus.Request nodesRequest = new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(new String[nodesIds.size()]))
|
||||
.snapshotIds(snapshotIds).timeout(request.masterNodeTimeout());
|
||||
transportNodesSnapshotsStatus.execute(nodesRequest, new ActionListener<TransportNodesSnapshotsStatus.NodesSnapshotStatus>() {
|
||||
@Override
|
||||
|
|
|
@ -66,10 +66,10 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
|
|||
|
||||
for (ClusterStatsNodeResponse r : nodeResponses) {
|
||||
for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) {
|
||||
ShardStats indexShardStats = countsPerIndex.get(shardStats.getShardRouting().getIndex());
|
||||
ShardStats indexShardStats = countsPerIndex.get(shardStats.getShardRouting().getIndexName());
|
||||
if (indexShardStats == null) {
|
||||
indexShardStats = new ShardStats();
|
||||
countsPerIndex.put(shardStats.getShardRouting().getIndex(), indexShardStats);
|
||||
countsPerIndex.put(shardStats.getShardRouting().getIndexName(), indexShardStats);
|
||||
}
|
||||
|
||||
indexShardStats.total++;
|
||||
|
|
|
@ -106,7 +106,8 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) {
|
||||
// only report on fully started shards
|
||||
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(),
|
||||
new CommonStats(indexShard, SHARD_STATS_FLAGS), indexShard.commitStats(), indexShard.seqNoStats()));
|
||||
new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats(),
|
||||
indexShard.seqNoStats()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -133,7 +134,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
}
|
||||
|
||||
ClusterStatsNodeRequest(String nodeId, ClusterStatsRequest request) {
|
||||
super(request, nodeId);
|
||||
super(nodeId);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ public class TransportRenderSearchTemplateAction extends HandledTransportAction<
|
|||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request, Collections.emptyMap());
|
||||
ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, Collections.emptyMap());
|
||||
BytesReference processedTemplate = (BytesReference) executable.run();
|
||||
RenderSearchTemplateResponse response = new RenderSearchTemplateResponse();
|
||||
response.source(processedTemplate);
|
||||
|
|
|
@ -286,24 +286,25 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
|||
return addValidationError("Must specify at least one alias action", validationException);
|
||||
}
|
||||
for (AliasActions aliasAction : allAliasActions) {
|
||||
if (aliasAction.aliases.length == 0) {
|
||||
if (CollectionUtils.isEmpty(aliasAction.aliases)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: aliases may not be empty", validationException);
|
||||
}
|
||||
for (String alias : aliasAction.aliases) {
|
||||
if (!Strings.hasText(alias)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: [alias] may not be empty string", validationException);
|
||||
+ "]: Property [alias/aliases] is either missing or null", validationException);
|
||||
} else {
|
||||
for (String alias : aliasAction.aliases) {
|
||||
if (!Strings.hasText(alias)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: [alias/aliases] may not be empty string", validationException);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (CollectionUtils.isEmpty(aliasAction.indices)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: Property [index] was either missing or null", validationException);
|
||||
+ "]: Property [index/indices] is either missing or null", validationException);
|
||||
} else {
|
||||
for (String index : aliasAction.indices) {
|
||||
if (!Strings.hasText(index)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: [index] may not be empty string", validationException);
|
||||
+ "]: [index/indices] may not be empty string", validationException);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ public class GetAliasesRequest extends MasterNodeReadRequest<GetAliasesRequest>
|
|||
private String[] indices = Strings.EMPTY_ARRAY;
|
||||
private String[] aliases = Strings.EMPTY_ARRAY;
|
||||
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpand();
|
||||
|
||||
public GetAliasesRequest(String[] aliases) {
|
||||
this.aliases = aliases;
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -49,17 +48,14 @@ import java.util.List;
|
|||
public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAction<ClearIndicesCacheRequest, ClearIndicesCacheResponse, TransportBroadcastByNodeAction.EmptyResult> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
private final IndicesRequestCache indicesRequestCache;
|
||||
|
||||
@Inject
|
||||
public TransportClearIndicesCacheAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService,
|
||||
IndicesRequestCache indicesQueryCache, ActionFilters actionFilters,
|
||||
TransportService transportService, IndicesService indicesService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT);
|
||||
this.indicesService = indicesService;
|
||||
this.indicesRequestCache = indicesQueryCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -81,7 +77,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
|||
|
||||
@Override
|
||||
protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) {
|
||||
IndexService service = indicesService.indexService(shardRouting.getIndex());
|
||||
IndexService service = indicesService.indexService(shardRouting.getIndexName());
|
||||
if (service != null) {
|
||||
IndexShard shard = service.getShardOrNull(shardRouting.id());
|
||||
boolean clearedAtLeastOne = false;
|
||||
|
@ -101,7 +97,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
|||
}
|
||||
if (request.requestCache()) {
|
||||
clearedAtLeastOne = true;
|
||||
indicesRequestCache.clear(shard);
|
||||
indicesService.clearRequestCache(shard);
|
||||
}
|
||||
if (request.recycler()) {
|
||||
logger.debug("Clear CacheRecycler on index [{}]", service.index());
|
||||
|
@ -117,7 +113,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
|||
} else {
|
||||
service.cache().clear("api");
|
||||
service.fieldData().clear();
|
||||
indicesRequestCache.clear(shard);
|
||||
indicesService.clearRequestCache(shard);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,14 +81,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
public CreateIndexRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new request to create an index that was triggered by a different request,
|
||||
* provided as an argument so that its headers and context can be copied to the new request.
|
||||
*/
|
||||
public CreateIndexRequest(ActionRequest request) {
|
||||
super(request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new request to create an index with the specified name.
|
||||
*/
|
||||
|
|
|
@ -42,17 +42,6 @@ public class FlushRequest extends BroadcastRequest<FlushRequest> {
|
|||
private boolean force = false;
|
||||
private boolean waitIfOngoing = false;
|
||||
|
||||
public FlushRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy constructor that creates a new flush request that is a copy of the one provided as an argument.
|
||||
* The new request will inherit though headers and context from the original request that caused it.
|
||||
*/
|
||||
public FlushRequest(ActionRequest originalRequest) {
|
||||
super(originalRequest);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new flush request against one or more indices. If nothing is provided, all indices will
|
||||
* be flushed.
|
||||
|
|
|
@ -31,7 +31,7 @@ public class ShardFlushRequest extends ReplicationRequest<ShardFlushRequest> {
|
|||
private FlushRequest request = new FlushRequest();
|
||||
|
||||
public ShardFlushRequest(FlushRequest request, ShardId shardId) {
|
||||
super(request, shardId);
|
||||
super(shardId);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,17 +36,6 @@ import java.util.Arrays;
|
|||
*/
|
||||
public class SyncedFlushRequest extends BroadcastRequest<SyncedFlushRequest> {
|
||||
|
||||
public SyncedFlushRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy constructor that creates a new synced flush request that is a copy of the one provided as an argument.
|
||||
* The new request will inherit though headers and context from the original request that caused it.
|
||||
*/
|
||||
public SyncedFlushRequest(ActionRequest originalRequest) {
|
||||
super(originalRequest);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will
|
||||
* be sync flushed.
|
||||
|
|
|
@ -58,7 +58,7 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) throws Throwable {
|
||||
protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
|
||||
indexShard.flush(shardRequest.getRequest());
|
||||
logger.trace("{} flush request executed on primary", indexShard.shardId());
|
||||
|
|
|
@ -42,7 +42,6 @@ public class GetFieldMappingsIndexRequest extends SingleShardRequest<GetFieldMap
|
|||
}
|
||||
|
||||
GetFieldMappingsIndexRequest(GetFieldMappingsRequest other, String index, boolean probablySingleFieldRequest) {
|
||||
super(other);
|
||||
this.probablySingleFieldRequest = probablySingleFieldRequest;
|
||||
this.includeDefaults = other.includeDefaults();
|
||||
this.types = other.types();
|
||||
|
|
|
@ -102,7 +102,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
.filter(type -> Regex.simpleMatch(request.types(), type))
|
||||
.collect(Collectors.toCollection(ArrayList::new));
|
||||
if (typeIntersection.isEmpty()) {
|
||||
throw new TypeMissingException(shardId.index(), request.types());
|
||||
throw new TypeMissingException(shardId.getIndex(), request.types());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
}
|
||||
}
|
||||
|
||||
return new GetFieldMappingsResponse(singletonMap(shardId.getIndex(), typeMappings.immutableMap()));
|
||||
return new GetFieldMappingsResponse(singletonMap(shardId.getIndexName(), typeMappings.immutableMap()));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -75,7 +75,7 @@ public class TransportRecoveryAction extends TransportBroadcastByNodeAction<Reco
|
|||
if (recoveryState == null) {
|
||||
continue;
|
||||
}
|
||||
String indexName = recoveryState.getShardId().getIndex();
|
||||
String indexName = recoveryState.getShardId().getIndexName();
|
||||
if (!shardResponses.containsKey(indexName)) {
|
||||
shardResponses.put(indexName, new ArrayList<>());
|
||||
}
|
||||
|
|
|
@ -33,17 +33,6 @@ import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
|||
*/
|
||||
public class RefreshRequest extends BroadcastRequest<RefreshRequest> {
|
||||
|
||||
public RefreshRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy constructor that creates a new refresh request that is a copy of the one provided as an argument.
|
||||
* The new request will inherit though headers and context from the original request that caused it.
|
||||
*/
|
||||
public RefreshRequest(ActionRequest originalRequest) {
|
||||
super(originalRequest);
|
||||
}
|
||||
|
||||
public RefreshRequest(String... indices) {
|
||||
super(indices);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.indices.refresh;
|
|||
import org.elasticsearch.action.ReplicationResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
|
@ -37,7 +37,7 @@ import java.util.List;
|
|||
/**
|
||||
* Refresh action.
|
||||
*/
|
||||
public class TransportRefreshAction extends TransportBroadcastReplicationAction<RefreshRequest, RefreshResponse, ReplicationRequest, ReplicationResponse> {
|
||||
public class TransportRefreshAction extends TransportBroadcastReplicationAction<RefreshRequest, RefreshResponse, BasicReplicationRequest, ReplicationResponse> {
|
||||
|
||||
@Inject
|
||||
public TransportRefreshAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
|
@ -53,8 +53,8 @@ public class TransportRefreshAction extends TransportBroadcastReplicationAction<
|
|||
}
|
||||
|
||||
@Override
|
||||
protected ReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) {
|
||||
return new ReplicationRequest(request, shardId);
|
||||
protected BasicReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) {
|
||||
return new BasicReplicationRequest(shardId);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.action.admin.indices.refresh;
|
|||
|
||||
import org.elasticsearch.action.ReplicationResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
|
@ -41,7 +41,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportShardRefreshAction extends TransportReplicationAction<ReplicationRequest, ReplicationRequest, ReplicationResponse> {
|
||||
public class TransportShardRefreshAction extends TransportReplicationAction<BasicReplicationRequest, BasicReplicationRequest, ReplicationResponse> {
|
||||
|
||||
public static final String NAME = RefreshAction.NAME + "[s]";
|
||||
|
||||
|
@ -51,7 +51,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
|
|||
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
|
||||
actionFilters, indexNameExpressionResolver, ReplicationRequest::new, ReplicationRequest::new, ThreadPool.Names.REFRESH);
|
||||
actionFilters, indexNameExpressionResolver, BasicReplicationRequest::new, BasicReplicationRequest::new, ThreadPool.Names.REFRESH);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -60,7 +60,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<ReplicationResponse, ReplicationRequest> shardOperationOnPrimary(MetaData metaData, ReplicationRequest shardRequest) throws Throwable {
|
||||
protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
|
||||
indexShard.refresh("api");
|
||||
logger.trace("{} refresh request executed on primary", indexShard.shardId());
|
||||
|
@ -68,7 +68,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void shardOperationOnReplica(ReplicationRequest request) {
|
||||
protected void shardOperationOnReplica(BasicReplicationRequest request) {
|
||||
final ShardId shardId = request.shardId();
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
|
||||
indexShard.refresh("api");
|
||||
|
|
|
@ -62,17 +62,17 @@ public class IndicesSegmentResponse extends BroadcastResponse implements ToXCont
|
|||
|
||||
Set<String> indices = new HashSet<>();
|
||||
for (ShardSegments shard : shards) {
|
||||
indices.add(shard.getShardRouting().getIndex());
|
||||
indices.add(shard.getShardRouting().getIndexName());
|
||||
}
|
||||
|
||||
for (String index : indices) {
|
||||
for (String indexName : indices) {
|
||||
List<ShardSegments> shards = new ArrayList<>();
|
||||
for (ShardSegments shard : this.shards) {
|
||||
if (shard.getShardRouting().index().equals(index)) {
|
||||
if (shard.getShardRouting().getIndexName().equals(indexName)) {
|
||||
shards.add(shard);
|
||||
}
|
||||
}
|
||||
indicesSegments.put(index, new IndexSegments(index, shards.toArray(new ShardSegments[shards.size()])));
|
||||
indicesSegments.put(indexName, new IndexSegments(indexName, shards.toArray(new ShardSegments[shards.size()])));
|
||||
}
|
||||
this.indicesSegments = indicesSegments;
|
||||
return indicesSegments;
|
||||
|
|
|
@ -93,7 +93,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi
|
|||
|
||||
@Override
|
||||
protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndex());
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndexName());
|
||||
IndexShard indexShard = indexService.getShard(shardRouting.id());
|
||||
return new ShardSegments(indexShard.routingEntry(), indexShard.segments(request.verbose()));
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
|
|||
continue;
|
||||
}
|
||||
|
||||
Settings settings = SettingsFilter.filterSettings(settingsFilter.getPatterns(), indexMetaData.getSettings());
|
||||
Settings settings = settingsFilter.filter(indexMetaData.getSettings());
|
||||
if (request.humanReadable()) {
|
||||
settings = IndexMetaData.addHumanReadableSettings(settings);
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
|
|||
if (globalBlock != null) {
|
||||
return globalBlock;
|
||||
}
|
||||
if (request.settings().getAsMap().size() == 1 && (request.settings().get(IndexMetaData.SETTING_BLOCKS_METADATA) != null || request.settings().get(IndexMetaData.SETTING_READ_ONLY) != null )) {
|
||||
if (request.settings().getAsMap().size() == 1 && IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.exists(request.settings()) || IndexMetaData.INDEX_READ_ONLY_SETTING.exists(request.settings())) {
|
||||
return null;
|
||||
}
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.io.stream.Streamable;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.index.shard.ShardStateMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -55,7 +56,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
*/
|
||||
public static class StoreStatus implements Streamable, ToXContent, Comparable<StoreStatus> {
|
||||
private DiscoveryNode node;
|
||||
private long version;
|
||||
private long legacyVersion;
|
||||
private String allocationId;
|
||||
private Throwable storeException;
|
||||
private AllocationStatus allocationStatus;
|
||||
|
@ -116,9 +117,9 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
private StoreStatus() {
|
||||
}
|
||||
|
||||
public StoreStatus(DiscoveryNode node, long version, String allocationId, AllocationStatus allocationStatus, Throwable storeException) {
|
||||
public StoreStatus(DiscoveryNode node, long legacyVersion, String allocationId, AllocationStatus allocationStatus, Throwable storeException) {
|
||||
this.node = node;
|
||||
this.version = version;
|
||||
this.legacyVersion = legacyVersion;
|
||||
this.allocationId = allocationId;
|
||||
this.allocationStatus = allocationStatus;
|
||||
this.storeException = storeException;
|
||||
|
@ -132,10 +133,10 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
}
|
||||
|
||||
/**
|
||||
* Version of the store
|
||||
* Version of the store for pre-3.0 shards that have not yet been active
|
||||
*/
|
||||
public long getVersion() {
|
||||
return version;
|
||||
public long getLegacyVersion() {
|
||||
return legacyVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -173,7 +174,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
node = DiscoveryNode.readNode(in);
|
||||
version = in.readLong();
|
||||
legacyVersion = in.readLong();
|
||||
allocationId = in.readOptionalString();
|
||||
allocationStatus = AllocationStatus.readFrom(in);
|
||||
if (in.readBoolean()) {
|
||||
|
@ -184,7 +185,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
out.writeLong(version);
|
||||
out.writeLong(legacyVersion);
|
||||
out.writeOptionalString(allocationId);
|
||||
allocationStatus.writeTo(out);
|
||||
if (storeException != null) {
|
||||
|
@ -198,8 +199,12 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
node.toXContent(builder, params);
|
||||
builder.field(Fields.VERSION, version);
|
||||
builder.field(Fields.ALLOCATION_ID, allocationId);
|
||||
if (legacyVersion != ShardStateMetaData.NO_VERSION) {
|
||||
builder.field(Fields.LEGACY_VERSION, legacyVersion);
|
||||
}
|
||||
if (allocationId != null) {
|
||||
builder.field(Fields.ALLOCATION_ID, allocationId);
|
||||
}
|
||||
builder.field(Fields.ALLOCATED, allocationStatus.value());
|
||||
if (storeException != null) {
|
||||
builder.startObject(Fields.STORE_EXCEPTION);
|
||||
|
@ -215,12 +220,23 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
return 1;
|
||||
} else if (other.storeException != null && storeException == null) {
|
||||
return -1;
|
||||
} else {
|
||||
int compare = Long.compare(other.version, version);
|
||||
}
|
||||
if (allocationId != null && other.allocationId == null) {
|
||||
return -1;
|
||||
} else if (allocationId == null && other.allocationId != null) {
|
||||
return 1;
|
||||
} else if (allocationId == null && other.allocationId == null) {
|
||||
int compare = Long.compare(other.legacyVersion, legacyVersion);
|
||||
if (compare == 0) {
|
||||
return Integer.compare(allocationStatus.id, other.allocationStatus.id);
|
||||
}
|
||||
return compare;
|
||||
} else {
|
||||
int compare = Integer.compare(allocationStatus.id, other.allocationStatus.id);
|
||||
if (compare == 0) {
|
||||
return allocationId.compareTo(other.allocationId);
|
||||
}
|
||||
return compare;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -390,7 +406,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
static final XContentBuilderString FAILURES = new XContentBuilderString("failures");
|
||||
static final XContentBuilderString STORES = new XContentBuilderString("stores");
|
||||
// StoreStatus fields
|
||||
static final XContentBuilderString VERSION = new XContentBuilderString("version");
|
||||
static final XContentBuilderString LEGACY_VERSION = new XContentBuilderString("legacy_version");
|
||||
static final XContentBuilderString ALLOCATION_ID = new XContentBuilderString("allocation_id");
|
||||
static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception");
|
||||
static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation");
|
||||
|
|
|
@ -166,7 +166,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
ImmutableOpenMap.Builder<String, ImmutableOpenIntMap<java.util.List<IndicesShardStoresResponse.StoreStatus>>> indicesStoreStatusesBuilder = ImmutableOpenMap.builder();
|
||||
java.util.List<IndicesShardStoresResponse.Failure> failureBuilder = new ArrayList<>();
|
||||
for (Response fetchResponse : fetchResponses) {
|
||||
ImmutableOpenIntMap<java.util.List<IndicesShardStoresResponse.StoreStatus>> indexStoreStatuses = indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndex());
|
||||
ImmutableOpenIntMap<java.util.List<IndicesShardStoresResponse.StoreStatus>> indexStoreStatuses = indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndexName());
|
||||
final ImmutableOpenIntMap.Builder<java.util.List<IndicesShardStoresResponse.StoreStatus>> indexShardsBuilder;
|
||||
if (indexStoreStatuses == null) {
|
||||
indexShardsBuilder = ImmutableOpenIntMap.builder();
|
||||
|
@ -179,15 +179,15 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
}
|
||||
for (NodeGatewayStartedShards response : fetchResponse.responses) {
|
||||
if (shardExistsInNode(response)) {
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode());
|
||||
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException()));
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), response.getNode());
|
||||
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.legacyVersion(), response.allocationId(), allocationStatus, response.storeException()));
|
||||
}
|
||||
}
|
||||
CollectionUtil.timSort(storeStatuses);
|
||||
indexShardsBuilder.put(fetchResponse.shardId.id(), storeStatuses);
|
||||
indicesStoreStatusesBuilder.put(fetchResponse.shardId.getIndex(), indexShardsBuilder.build());
|
||||
indicesStoreStatusesBuilder.put(fetchResponse.shardId.getIndexName(), indexShardsBuilder.build());
|
||||
for (FailedNodeException failure : fetchResponse.failures) {
|
||||
failureBuilder.add(new IndicesShardStoresResponse.Failure(failure.nodeId(), fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), failure.getCause()));
|
||||
failureBuilder.add(new IndicesShardStoresResponse.Failure(failure.nodeId(), fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), failure.getCause()));
|
||||
}
|
||||
}
|
||||
listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder)));
|
||||
|
@ -196,7 +196,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) {
|
||||
for (ShardRouting shardRouting : routingNodes.node(node.id())) {
|
||||
ShardId shardId = shardRouting.shardId();
|
||||
if (shardId.id() == shardID && shardId.getIndex().equals(index)) {
|
||||
if (shardId.id() == shardID && shardId.getIndexName().equals(index)) {
|
||||
if (shardRouting.primary()) {
|
||||
return IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY;
|
||||
} else if (shardRouting.assignedToNode()) {
|
||||
|
@ -213,7 +213,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
* A shard exists/existed in a node only if shard state file exists in the node
|
||||
*/
|
||||
private boolean shardExistsInNode(final NodeGatewayStartedShards response) {
|
||||
return response.storeException() != null || response.version() != -1 || response.allocationId() != null;
|
||||
return response.storeException() != null || response.legacyVersion() != -1 || response.allocationId() != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.index.store.StoreStats;
|
|||
import org.elasticsearch.index.suggest.stats.SuggestStats;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.index.warmer.WarmerStats;
|
||||
import org.elasticsearch.indices.IndicesQueryCache;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -122,7 +123,8 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
|
||||
public CommonStats(IndexShard indexShard, CommonStatsFlags flags) {
|
||||
public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) {
|
||||
|
||||
CommonStatsFlags.Flag[] setFlags = flags.getFlags();
|
||||
|
||||
for (CommonStatsFlags.Flag flag : setFlags) {
|
||||
|
@ -155,7 +157,7 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
warmer = indexShard.warmerStats();
|
||||
break;
|
||||
case QueryCache:
|
||||
queryCache = indexShard.queryCacheStats();
|
||||
queryCache = indicesQueryCache.getStats(indexShard.shardId());
|
||||
break;
|
||||
case FieldData:
|
||||
fieldData = indexShard.fieldDataStats(flags.fieldDataFields());
|
||||
|
|
|
@ -89,17 +89,17 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten
|
|||
|
||||
Set<String> indices = new HashSet<>();
|
||||
for (ShardStats shard : shards) {
|
||||
indices.add(shard.getShardRouting().getIndex());
|
||||
indices.add(shard.getShardRouting().getIndexName());
|
||||
}
|
||||
|
||||
for (String index : indices) {
|
||||
for (String indexName : indices) {
|
||||
List<ShardStats> shards = new ArrayList<>();
|
||||
for (ShardStats shard : this.shards) {
|
||||
if (shard.getShardRouting().index().equals(index)) {
|
||||
if (shard.getShardRouting().getIndexName().equals(indexName)) {
|
||||
shards.add(shard);
|
||||
}
|
||||
}
|
||||
indicesStats.put(index, new IndexStats(index, shards.toArray(new ShardStats[shards.size()])));
|
||||
indicesStats.put(indexName, new IndexStats(indexName, shards.toArray(new ShardStats[shards.size()])));
|
||||
}
|
||||
this.indicesStats = indicesStats;
|
||||
return indicesStats;
|
||||
|
|
|
@ -163,6 +163,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
|
|||
}
|
||||
|
||||
return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(),
|
||||
new CommonStats(indexShard, flags), indexShard.commitStats(), indexShard.seqNoStats());
|
||||
new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats(), indexShard.seqNoStats());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,9 +25,11 @@ import org.elasticsearch.cluster.ClusterService;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -38,13 +40,15 @@ import org.elasticsearch.transport.TransportService;
|
|||
public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<PutIndexTemplateRequest, PutIndexTemplateResponse> {
|
||||
|
||||
private final MetaDataIndexTemplateService indexTemplateService;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
|
||||
@Inject
|
||||
public TransportPutIndexTemplateAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, MetaDataIndexTemplateService indexTemplateService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexScopedSettings indexScopedSettings) {
|
||||
super(settings, PutIndexTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutIndexTemplateRequest::new);
|
||||
this.indexTemplateService = indexTemplateService;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -69,11 +73,13 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
|
|||
if (cause.length() == 0) {
|
||||
cause = "api";
|
||||
}
|
||||
|
||||
final Settings.Builder templateSettingsBuilder = Settings.settingsBuilder();
|
||||
templateSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
indexScopedSettings.validate(templateSettingsBuilder);
|
||||
indexTemplateService.putTemplate(new MetaDataIndexTemplateService.PutRequest(cause, request.name())
|
||||
.template(request.template())
|
||||
.order(request.order())
|
||||
.settings(request.settings())
|
||||
.settings(templateSettingsBuilder.build())
|
||||
.mappings(request.mappings())
|
||||
.aliases(request.aliases())
|
||||
.customs(request.customs())
|
||||
|
|
|
@ -59,14 +59,14 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte
|
|||
indices.add(shard.getIndex());
|
||||
}
|
||||
|
||||
for (String index : indices) {
|
||||
for (String indexName : indices) {
|
||||
List<ShardUpgradeStatus> shards = new ArrayList<>();
|
||||
for (ShardUpgradeStatus shard : this.shards) {
|
||||
if (shard.getShardRouting().index().equals(index)) {
|
||||
if (shard.getShardRouting().getIndexName().equals(indexName)) {
|
||||
shards.add(shard);
|
||||
}
|
||||
}
|
||||
indicesUpgradeStats.put(index, new IndexUpgradeStatus(index, shards.toArray(new ShardUpgradeStatus[shards.size()])));
|
||||
indicesUpgradeStats.put(indexName, new IndexUpgradeStatus(indexName, shards.toArray(new ShardUpgradeStatus[shards.size()])));
|
||||
}
|
||||
this.indicesUpgradeStatus = indicesUpgradeStats;
|
||||
return indicesUpgradeStats;
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -75,7 +76,7 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
|
|||
Map<String, Tuple<Version, org.apache.lucene.util.Version>> versions = new HashMap<>();
|
||||
for (ShardUpgradeResult result : shardUpgradeResults) {
|
||||
successfulShards++;
|
||||
String index = result.getShardId().getIndex();
|
||||
String index = result.getShardId().getIndex().getName();
|
||||
if (result.primary()) {
|
||||
Integer count = successfulPrimaryShards.get(index);
|
||||
successfulPrimaryShards.put(index, count == null ? 1 : count + 1);
|
||||
|
@ -179,7 +180,7 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(UpgradeRequest request, final ActionListener<UpgradeResponse> listener) {
|
||||
protected void doExecute(Task task, UpgradeRequest request, final ActionListener<UpgradeResponse> listener) {
|
||||
ActionListener<UpgradeResponse> settingsUpdateListener = new ActionListener<UpgradeResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpgradeResponse upgradeResponse) {
|
||||
|
@ -199,7 +200,7 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
|
|||
listener.onFailure(e);
|
||||
}
|
||||
};
|
||||
super.doExecute(request, settingsUpdateListener);
|
||||
super.doExecute(task, request, settingsUpdateListener);
|
||||
}
|
||||
|
||||
private void updateSettings(final UpgradeResponse upgradeResponse, final ActionListener<UpgradeResponse> listener) {
|
||||
|
|
|
@ -52,6 +52,7 @@ import org.elasticsearch.search.SearchService;
|
|||
import org.elasticsearch.search.internal.DefaultSearchContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -89,14 +90,14 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(ValidateQueryRequest request, ActionListener<ValidateQueryResponse> listener) {
|
||||
protected void doExecute(Task task, ValidateQueryRequest request, ActionListener<ValidateQueryResponse> listener) {
|
||||
request.nowInMillis = System.currentTimeMillis();
|
||||
super.doExecute(request, listener);
|
||||
super.doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardValidateQueryRequest newShardRequest(int numShards, ShardRouting shard, ValidateQueryRequest request) {
|
||||
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterService.state(), shard.index(), request.indices());
|
||||
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterService.state(), shard.getIndexName(), request.indices());
|
||||
return new ShardValidateQueryRequest(shard.shardId(), filteringAliases, request);
|
||||
}
|
||||
|
||||
|
@ -163,7 +164,8 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.getShard(request.shardId().id());
|
||||
final QueryShardContext queryShardContext = indexShard.getQueryShardContext();
|
||||
final QueryShardContext queryShardContext = indexService.newQueryShardContext();
|
||||
queryShardContext.setTypes(request.types());
|
||||
|
||||
boolean valid;
|
||||
String explanation = null;
|
||||
|
@ -182,11 +184,10 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||
searchContext.preProcess();
|
||||
|
||||
valid = true;
|
||||
if (request.explain()) {
|
||||
explanation = searchContext.parsedQuery().query().toString();
|
||||
}
|
||||
if (request.rewrite()) {
|
||||
explanation = getRewrittenQuery(searcher.searcher(), searchContext.query());
|
||||
} else if (request.explain()) {
|
||||
explanation = searchContext.filteredQuery().query().toString();
|
||||
}
|
||||
} catch (QueryShardException|ParsingException e) {
|
||||
valid = false;
|
||||
|
|
|
@ -179,7 +179,7 @@ public class BulkProcessor implements Closeable {
|
|||
|
||||
|
||||
private final ScheduledThreadPoolExecutor scheduler;
|
||||
private final ScheduledFuture scheduledFuture;
|
||||
private final ScheduledFuture<?> scheduledFuture;
|
||||
|
||||
private final AtomicLong executionIdGen = new AtomicLong();
|
||||
|
||||
|
@ -250,24 +250,24 @@ public class BulkProcessor implements Closeable {
|
|||
* (for example, if no id is provided, one will be generated, or usage of the create flag).
|
||||
*/
|
||||
public BulkProcessor add(IndexRequest request) {
|
||||
return add((ActionRequest) request);
|
||||
return add((ActionRequest<?>) request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link DeleteRequest} to the list of actions to execute.
|
||||
*/
|
||||
public BulkProcessor add(DeleteRequest request) {
|
||||
return add((ActionRequest) request);
|
||||
return add((ActionRequest<?>) request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds either a delete or an index request.
|
||||
*/
|
||||
public BulkProcessor add(ActionRequest request) {
|
||||
public BulkProcessor add(ActionRequest<?> request) {
|
||||
return add(request, null);
|
||||
}
|
||||
|
||||
public BulkProcessor add(ActionRequest request, @Nullable Object payload) {
|
||||
public BulkProcessor add(ActionRequest<?> request, @Nullable Object payload) {
|
||||
internalAdd(request, payload);
|
||||
return this;
|
||||
}
|
||||
|
@ -282,18 +282,18 @@ public class BulkProcessor implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
private synchronized void internalAdd(ActionRequest request, @Nullable Object payload) {
|
||||
private synchronized void internalAdd(ActionRequest<?> request, @Nullable Object payload) {
|
||||
ensureOpen();
|
||||
bulkRequest.add(request, payload);
|
||||
executeIfNeeded();
|
||||
}
|
||||
|
||||
public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
|
||||
return add(data, defaultIndex, defaultType, null);
|
||||
return add(data, defaultIndex, defaultType, null, null);
|
||||
}
|
||||
|
||||
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable Object payload) throws Exception {
|
||||
bulkRequest.add(data, defaultIndex, defaultType, null, null, payload, true);
|
||||
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultPipeline, @Nullable Object payload) throws Exception {
|
||||
bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, payload, true);
|
||||
executeIfNeeded();
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.action.delete.DeleteRequest;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -56,7 +57,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
|
||||
private static final int REQUEST_OVERHEAD = 50;
|
||||
|
||||
final List<ActionRequest> requests = new ArrayList<>();
|
||||
final List<ActionRequest<?>> requests = new ArrayList<>();
|
||||
List<Object> payloads = null;
|
||||
|
||||
protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT;
|
||||
|
@ -68,25 +69,17 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
public BulkRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a bulk request caused by some other request, which is provided as an
|
||||
* argument so that its headers and context can be copied to the new request
|
||||
*/
|
||||
public BulkRequest(ActionRequest request) {
|
||||
super(request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a list of requests to be executed. Either index or delete requests.
|
||||
*/
|
||||
public BulkRequest add(ActionRequest... requests) {
|
||||
for (ActionRequest request : requests) {
|
||||
public BulkRequest add(ActionRequest<?>... requests) {
|
||||
for (ActionRequest<?> request : requests) {
|
||||
add(request, null);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public BulkRequest add(ActionRequest request) {
|
||||
public BulkRequest add(ActionRequest<?> request) {
|
||||
return add(request, null);
|
||||
}
|
||||
|
||||
|
@ -96,7 +89,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
* @param payload Optional payload
|
||||
* @return the current bulk request
|
||||
*/
|
||||
public BulkRequest add(ActionRequest request, @Nullable Object payload) {
|
||||
public BulkRequest add(ActionRequest<?> request, @Nullable Object payload) {
|
||||
if (request instanceof IndexRequest) {
|
||||
add((IndexRequest) request, payload);
|
||||
} else if (request instanceof DeleteRequest) {
|
||||
|
@ -112,8 +105,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
/**
|
||||
* Adds a list of requests to be executed. Either index or delete requests.
|
||||
*/
|
||||
public BulkRequest add(Iterable<ActionRequest> requests) {
|
||||
for (ActionRequest request : requests) {
|
||||
public BulkRequest add(Iterable<ActionRequest<?>> requests) {
|
||||
for (ActionRequest<?> request : requests) {
|
||||
add(request);
|
||||
}
|
||||
return this;
|
||||
|
@ -196,15 +189,14 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
/**
|
||||
* The list of requests in this bulk request.
|
||||
*/
|
||||
public List<ActionRequest> requests() {
|
||||
public List<ActionRequest<?>> requests() {
|
||||
return this.requests;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<? extends IndicesRequest> subRequests() {
|
||||
List<IndicesRequest> indicesRequests = new ArrayList<>();
|
||||
for (ActionRequest request : requests) {
|
||||
for (ActionRequest<?> request : requests) {
|
||||
assert request instanceof IndicesRequest;
|
||||
indicesRequests.add((IndicesRequest) request);
|
||||
}
|
||||
|
@ -254,17 +246,17 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
* Adds a framed data in binary format
|
||||
*/
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
|
||||
return add(data, defaultIndex, defaultType, null, null, null, true);
|
||||
return add(data, defaultIndex, defaultType, null, null, null, null, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws Exception {
|
||||
return add(data, defaultIndex, defaultType, null, null, null, allowExplicitIndex);
|
||||
return add(data, defaultIndex, defaultType, null, null, null, null, allowExplicitIndex);
|
||||
}
|
||||
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
|
||||
XContent xContent = XContentFactory.xContent(data);
|
||||
int line = 0;
|
||||
int from = 0;
|
||||
|
@ -305,6 +297,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
long version = Versions.MATCH_ANY;
|
||||
VersionType versionType = VersionType.INTERNAL;
|
||||
int retryOnConflict = 0;
|
||||
String pipeline = defaultPipeline;
|
||||
|
||||
// at this stage, next token can either be END_OBJECT (and use default index and type, with auto generated id)
|
||||
// or START_OBJECT which will have another set of parameters
|
||||
|
@ -345,6 +338,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
versionType = VersionType.fromString(parser.text());
|
||||
} else if ("_retry_on_conflict".equals(currentFieldName) || "_retryOnConflict".equals(currentFieldName)) {
|
||||
retryOnConflict = parser.intValue();
|
||||
} else if ("pipeline".equals(currentFieldName)) {
|
||||
pipeline = parser.text();
|
||||
} else if ("fields".equals(currentFieldName)) {
|
||||
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected");
|
||||
} else {
|
||||
|
@ -381,15 +376,15 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
if ("index".equals(action)) {
|
||||
if (opType == null) {
|
||||
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType)
|
||||
.source(data.slice(from, nextMarker - from)), payload);
|
||||
.setPipeline(pipeline).source(data.slice(from, nextMarker - from)), payload);
|
||||
} else {
|
||||
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType)
|
||||
.create("create".equals(opType))
|
||||
.create("create".equals(opType)).setPipeline(pipeline)
|
||||
.source(data.slice(from, nextMarker - from)), payload);
|
||||
}
|
||||
} else if ("create".equals(action)) {
|
||||
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType)
|
||||
.create(true)
|
||||
.create(true).setPipeline(pipeline)
|
||||
.source(data.slice(from, nextMarker - from)), payload);
|
||||
} else if ("update".equals(action)) {
|
||||
UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).parent(parent).retryOnConflict(retryOnConflict)
|
||||
|
@ -480,13 +475,29 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Whether this bulk request contains index request with an ingest pipeline enabled.
|
||||
*/
|
||||
public boolean hasIndexRequestsWithPipelines() {
|
||||
for (ActionRequest actionRequest : requests) {
|
||||
if (actionRequest instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) actionRequest;
|
||||
if (Strings.hasText(indexRequest.getPipeline())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (requests.isEmpty()) {
|
||||
validationException = addValidationError("no requests added", validationException);
|
||||
}
|
||||
for (ActionRequest request : requests) {
|
||||
for (ActionRequest<?> request : requests) {
|
||||
// We first check if refresh has been set
|
||||
if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) ||
|
||||
(request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) ||
|
||||
|
@ -535,7 +546,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
super.writeTo(out);
|
||||
out.writeByte(consistencyLevel.id());
|
||||
out.writeVInt(requests.size());
|
||||
for (ActionRequest request : requests) {
|
||||
for (ActionRequest<?> request : requests) {
|
||||
if (request instanceof IndexRequest) {
|
||||
out.writeByte((byte) 0);
|
||||
} else if (request instanceof DeleteRequest) {
|
||||
|
|
|
@ -41,7 +41,7 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
|||
}
|
||||
|
||||
BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, boolean refresh, BulkItemRequest[] items) {
|
||||
super(bulkRequest, shardId);
|
||||
super(shardId);
|
||||
this.items = items;
|
||||
this.refresh = refresh;
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
for (Map.Entry<String, Set<String>> entry : indicesAndTypes.entrySet()) {
|
||||
final String index = entry.getKey();
|
||||
if (autoCreateIndex.shouldAutoCreate(index, state)) {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(bulkRequest);
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
createIndexRequest.index(index);
|
||||
for (String type : entry.getValue()) {
|
||||
createIndexRequest.mapping(type);
|
||||
|
@ -377,7 +377,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
if (unavailableException == null) {
|
||||
IndexMetaData indexMetaData = metaData.index(concreteIndex);
|
||||
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
unavailableException = new IndexClosedException(new Index(metaData.index(request.index()).getIndex()));
|
||||
unavailableException = new IndexClosedException(metaData.index(request.index()).getIndex());
|
||||
}
|
||||
}
|
||||
if (unavailableException != null) {
|
||||
|
|
|
@ -92,7 +92,7 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||
* The new request will inherit though headers and context from the original request that caused it.
|
||||
*/
|
||||
public DeleteRequest(DeleteRequest request, ActionRequest originalRequest) {
|
||||
super(request, originalRequest);
|
||||
super(request);
|
||||
this.type = request.type();
|
||||
this.id = request.id();
|
||||
this.routing = request.routing();
|
||||
|
@ -102,14 +102,6 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||
this.versionType = request.versionType();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a delete request caused by some other request, which is provided as an
|
||||
* argument so that its headers and context can be copied to the new request
|
||||
*/
|
||||
public DeleteRequest(ActionRequest request) {
|
||||
super(request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = super.validate();
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.index.shard.IndexShard;
|
|||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -69,27 +70,27 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
|
||||
protected void doExecute(Task task, final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
|
||||
ClusterState state = clusterService.state();
|
||||
if (autoCreateIndex.shouldAutoCreate(request.index(), state)) {
|
||||
createIndexAction.execute(new CreateIndexRequest(request).index(request.index()).cause("auto(delete api)").masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
|
||||
createIndexAction.execute(task, new CreateIndexRequest().index(request.index()).cause("auto(delete api)").masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(CreateIndexResponse result) {
|
||||
innerExecute(request, listener);
|
||||
innerExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
|
||||
// we have the index, do it
|
||||
innerExecute(request, listener);
|
||||
innerExecute(task, request, listener);
|
||||
} else {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
innerExecute(request, listener);
|
||||
innerExecute(task, request, listener);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -114,8 +115,8 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
|||
request.setShardId(shardId);
|
||||
}
|
||||
|
||||
private void innerExecute(final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
|
||||
super.doExecute(request, listener);
|
||||
private void innerExecute(Task task, final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
|
||||
super.doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -108,7 +108,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
|
||||
Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
|
||||
if (!result.exists()) {
|
||||
return new ExplainResponse(shardId.getIndex(), request.type(), request.id(), false);
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
|
||||
}
|
||||
|
||||
SearchContext context = new DefaultSearchContext(
|
||||
|
@ -121,7 +121,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
SearchContext.setCurrent(context);
|
||||
|
||||
try {
|
||||
context.parsedQuery(indexShard.getQueryShardContext().toQuery(request.query()));
|
||||
context.parsedQuery(context.getQueryShardContext().toQuery(request.query()));
|
||||
context.preProcess();
|
||||
int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
|
||||
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
|
||||
|
@ -134,9 +134,9 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
// because we are working in the same searcher in engineGetResult we can be sure that a
|
||||
// doc isn't deleted between the initial get and this call.
|
||||
GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext(), false);
|
||||
return new ExplainResponse(shardId.getIndex(), request.type(), request.id(), true, explanation, getResult);
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
|
||||
} else {
|
||||
return new ExplainResponse(shardId.getIndex(), request.type(), request.id(), true, explanation);
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("Could not explain", e);
|
||||
|
|
|
@ -73,7 +73,7 @@ public class IndexConstraint {
|
|||
}
|
||||
|
||||
/**
|
||||
* @return On what property of a field the contraint is going to be applied on (min or max value)
|
||||
* @return On what property of a field the constraint is going to be applied on (min or max value)
|
||||
*/
|
||||
public Property getProperty() {
|
||||
return property;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.get;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.RealtimeRequest;
|
||||
import org.elasticsearch.action.ValidateActions;
|
||||
|
@ -72,8 +71,7 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
* Copy constructor that creates a new get request that is a copy of the one provided as an argument.
|
||||
* The new request will inherit though headers and context from the original request that caused it.
|
||||
*/
|
||||
public GetRequest(GetRequest getRequest, ActionRequest originalRequest) {
|
||||
super(originalRequest);
|
||||
public GetRequest(GetRequest getRequest) {
|
||||
this.index = getRequest.index;
|
||||
this.type = getRequest.type;
|
||||
this.id = getRequest.id;
|
||||
|
@ -98,14 +96,6 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
this.type = "_all";
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new get request starting from the provided request, meaning that it will
|
||||
* inherit its headers and context, and against the specified index.
|
||||
*/
|
||||
public GetRequest(ActionRequest request, String index) {
|
||||
super(request, index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new get request against the specified index with the type and id.
|
||||
*
|
||||
|
|
|
@ -266,18 +266,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
|
||||
List<Item> items = new ArrayList<>();
|
||||
|
||||
public MultiGetRequest() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a multi get request caused by some other request, which is provided as an
|
||||
* argument so that its headers and context can be copied to the new request
|
||||
*/
|
||||
public MultiGetRequest(ActionRequest request) {
|
||||
super(request);
|
||||
}
|
||||
|
||||
public List<Item> getItems() {
|
||||
return this.items;
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
|
|||
}
|
||||
|
||||
MultiGetShardRequest(MultiGetRequest multiGetRequest, String index, int shardId) {
|
||||
super(multiGetRequest, index);
|
||||
super(index);
|
||||
this.shardId = shardId;
|
||||
locations = new IntArrayList();
|
||||
items = new ArrayList<>();
|
||||
|
|
|
@ -79,7 +79,7 @@ public class TransportMultiGetAction extends HandledTransportAction<MultiGetRequ
|
|||
.getShards(clusterState, concreteSingleIndex, item.type(), item.id(), item.routing(), null).shardId();
|
||||
MultiGetShardRequest shardRequest = shardRequests.get(shardId);
|
||||
if (shardRequest == null) {
|
||||
shardRequest = new MultiGetShardRequest(request, shardId.index().name(), shardId.id());
|
||||
shardRequest = new MultiGetShardRequest(request, shardId.getIndexName(), shardId.id());
|
||||
shardRequests.put(shardId, shardRequest);
|
||||
}
|
||||
shardRequest.add(i, item);
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.index;
|
|||
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.DocumentRequest;
|
||||
import org.elasticsearch.action.RoutingMissingException;
|
||||
|
@ -155,23 +154,17 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
|
||||
private XContentType contentType = Requests.INDEX_CONTENT_TYPE;
|
||||
|
||||
public IndexRequest() {
|
||||
}
|
||||
private String pipeline;
|
||||
|
||||
/**
|
||||
* Creates an index request caused by some other request, which is provided as an
|
||||
* argument so that its headers and context can be copied to the new request
|
||||
*/
|
||||
public IndexRequest(ActionRequest request) {
|
||||
super(request);
|
||||
public IndexRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy constructor that creates a new index request that is a copy of the one provided as an argument.
|
||||
* The new request will inherit though headers and context from the original request that caused it.
|
||||
*/
|
||||
public IndexRequest(IndexRequest indexRequest, ActionRequest originalRequest) {
|
||||
super(indexRequest, originalRequest);
|
||||
public IndexRequest(IndexRequest indexRequest) {
|
||||
super(indexRequest);
|
||||
this.type = indexRequest.type;
|
||||
this.id = indexRequest.id;
|
||||
this.routing = indexRequest.routing;
|
||||
|
@ -364,7 +357,22 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
}
|
||||
|
||||
/**
|
||||
* The source of the document to index, recopied to a new array if it is unsage.
|
||||
* Sets the ingest pipeline to be executed before indexing the document
|
||||
*/
|
||||
public IndexRequest setPipeline(String pipeline) {
|
||||
this.pipeline = pipeline;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the ingest pipeline to be executed before indexing the document
|
||||
*/
|
||||
public String getPipeline() {
|
||||
return this.pipeline;
|
||||
}
|
||||
|
||||
/**
|
||||
* The source of the document to index, recopied to a new array if it is unsafe.
|
||||
*/
|
||||
public BytesReference source() {
|
||||
return source;
|
||||
|
@ -658,6 +666,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
refresh = in.readBoolean();
|
||||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
pipeline = in.readOptionalString();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -679,6 +688,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
out.writeBoolean(refresh);
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
out.writeOptionalString(pipeline);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -278,4 +278,12 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
|
|||
request.ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the ingest pipeline to be executed before indexing the document
|
||||
*/
|
||||
public IndexRequestBuilder setPipeline(String pipeline) {
|
||||
request.setPipeline(pipeline);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -84,19 +85,19 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(final IndexRequest request, final ActionListener<IndexResponse> listener) {
|
||||
protected void doExecute(Task task, final IndexRequest request, final ActionListener<IndexResponse> listener) {
|
||||
// if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API
|
||||
ClusterState state = clusterService.state();
|
||||
if (autoCreateIndex.shouldAutoCreate(request.index(), state)) {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(request);
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
createIndexRequest.index(request.index());
|
||||
createIndexRequest.mapping(request.type());
|
||||
createIndexRequest.cause("auto(index api)");
|
||||
createIndexRequest.masterNodeTimeout(request.timeout());
|
||||
createIndexAction.execute(createIndexRequest, new ActionListener<CreateIndexResponse>() {
|
||||
createIndexAction.execute(task, createIndexRequest, new ActionListener<CreateIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(CreateIndexResponse result) {
|
||||
innerExecute(request, listener);
|
||||
innerExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -104,7 +105,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
|
||||
// we have the index, do it
|
||||
try {
|
||||
innerExecute(request, listener);
|
||||
innerExecute(task, request, listener);
|
||||
} catch (Throwable e1) {
|
||||
listener.onFailure(e1);
|
||||
}
|
||||
|
@ -114,7 +115,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
}
|
||||
});
|
||||
} else {
|
||||
innerExecute(request, listener);
|
||||
innerExecute(task, request, listener);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -129,8 +130,8 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
request.setShardId(shardId);
|
||||
}
|
||||
|
||||
private void innerExecute(final IndexRequest request, final ActionListener<IndexResponse> listener) {
|
||||
super.doExecute(request, listener);
|
||||
private void innerExecute(Task task, final IndexRequest request, final ActionListener<IndexResponse> listener) {
|
||||
super.doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -139,14 +140,14 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Throwable {
|
||||
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Exception {
|
||||
|
||||
// validate, if routing is required, that we got routing
|
||||
IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex());
|
||||
MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
|
||||
if (mappingMd != null && mappingMd.routing().required()) {
|
||||
if (request.routing() == null) {
|
||||
throw new RoutingMissingException(request.shardId().getIndex(), request.type(), request.id());
|
||||
throw new RoutingMissingException(request.shardId().getIndex().getName(), request.type(), request.id());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -176,7 +177,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
*/
|
||||
public static Engine.Index executeIndexRequestOnReplica(IndexRequest request, IndexShard indexShard) {
|
||||
final ShardId shardId = indexShard.shardId();
|
||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id())
|
||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndexName()).type(request.type()).id(request.id())
|
||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
||||
|
||||
final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.seqNo(), request.version(), request.versionType());
|
||||
|
@ -199,12 +200,12 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
* Execute the given {@link IndexRequest} on a primary shard, throwing a
|
||||
* {@link RetryOnPrimaryException} if the operation needs to be re-tried.
|
||||
*/
|
||||
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Throwable {
|
||||
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Exception {
|
||||
Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard);
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
final ShardId shardId = indexShard.shardId();
|
||||
if (update != null) {
|
||||
final String indexName = shardId.getIndex();
|
||||
final String indexName = shardId.getIndexName();
|
||||
mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update);
|
||||
operation = prepareIndexOperationOnPrimary(request, indexShard);
|
||||
update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
|
|
|
@ -164,7 +164,7 @@ public class PutIndexedScriptRequest extends ActionRequest<PutIndexedScriptReque
|
|||
}
|
||||
|
||||
/**
|
||||
* The source of the document to index, recopied to a new array if it is unsage.
|
||||
* The source of the document to index, recopied to a new array if it is unsafe.
|
||||
*/
|
||||
public BytesReference source() {
|
||||
return source;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue