mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
merge from master
This commit is contained in:
commit
330f2919cb
2
CONTRIBUTING.md → .github/CONTRIBUTING.md
vendored
2
CONTRIBUTING.md → .github/CONTRIBUTING.md
vendored
@ -54,7 +54,7 @@ Once your changes and tests are ready to submit for review:
|
||||
1. Test your changes
|
||||
|
||||
Run the test suite to make sure that nothing is broken. See the
|
||||
[TESTING](TESTING.asciidoc) file for help running tests.
|
||||
[TESTING](../TESTING.asciidoc) file for help running tests.
|
||||
|
||||
2. Sign the Contributor License Agreement
|
||||
|
34
.github/ISSUE_TEMPLATE.md
vendored
Normal file
34
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
<!--
|
||||
GitHub is reserved for bug reports and feature requests. The best place
|
||||
to ask a general question is at the Elastic Discourse forums at
|
||||
https://discuss.elastic.co. If you are in fact posting a bug report or
|
||||
a feature request, please include one and only one of the below blocks
|
||||
in your new issue.
|
||||
-->
|
||||
|
||||
<!--
|
||||
If you are filing a bug report, please remove the below feature
|
||||
request block and provide responses for all of the below items.
|
||||
-->
|
||||
|
||||
**Elasticsearch version**:
|
||||
|
||||
**JVM version**:
|
||||
|
||||
**OS version**:
|
||||
|
||||
**Description of the problem including expected versus actual behavior**:
|
||||
|
||||
**Steps to reproduce**:
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
**Provide logs (if relevant)**:
|
||||
|
||||
<!--
|
||||
If you are filing a feature request, please remove the above bug
|
||||
report block and provide responses for all of the below items.
|
||||
-->
|
||||
|
||||
**Describe the feature**:
|
@ -1,3 +1,5 @@
|
||||
import java.nio.file.Files
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
@ -40,6 +42,15 @@ archivesBaseName = 'build-tools'
|
||||
Properties props = new Properties()
|
||||
props.load(project.file('version.properties').newDataInputStream())
|
||||
version = props.getProperty('elasticsearch')
|
||||
boolean snapshot = "true".equals(System.getProperty("build.snapshot", "true"));
|
||||
if (snapshot) {
|
||||
// we update the version property to reflect if we are building a snapshot or a release build
|
||||
// we write this back out below to load it in the Build.java which will be shown in rest main action
|
||||
// to indicate this being a snapshot build or a release build.
|
||||
version += "-SNAPSHOT"
|
||||
props.put("elasticsearch", version);
|
||||
}
|
||||
|
||||
|
||||
repositories {
|
||||
mavenCentral()
|
||||
@ -66,9 +77,22 @@ dependencies {
|
||||
compile 'org.apache.rat:apache-rat:0.11'
|
||||
}
|
||||
|
||||
File tempPropertiesFile = new File(project.buildDir, "version.properties")
|
||||
task writeVersionProperties {
|
||||
inputs.properties(props)
|
||||
doLast {
|
||||
OutputStream stream = Files.newOutputStream(tempPropertiesFile.toPath());
|
||||
try {
|
||||
props.store(stream, "UTF-8");
|
||||
} finally {
|
||||
stream.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
processResources {
|
||||
inputs.file('version.properties')
|
||||
from 'version.properties'
|
||||
dependsOn writeVersionProperties
|
||||
from tempPropertiesFile
|
||||
}
|
||||
|
||||
extraArchive {
|
||||
|
@ -354,11 +354,17 @@ class BuildPlugin implements Plugin<Project> {
|
||||
static void configureJarManifest(Project project) {
|
||||
project.tasks.withType(Jar) { Jar jarTask ->
|
||||
jarTask.doFirst {
|
||||
boolean isSnapshot = VersionProperties.elasticsearch.endsWith("-SNAPSHOT");
|
||||
String version = VersionProperties.elasticsearch;
|
||||
if (isSnapshot) {
|
||||
version = version.substring(0, version.length() - 9)
|
||||
}
|
||||
// this doFirst is added before the info plugin, therefore it will run
|
||||
// after the doFirst added by the info plugin, and we can override attributes
|
||||
jarTask.manifest.attributes(
|
||||
'X-Compile-Elasticsearch-Version': VersionProperties.elasticsearch,
|
||||
'X-Compile-Elasticsearch-Version': version,
|
||||
'X-Compile-Lucene-Version': VersionProperties.lucene,
|
||||
'X-Compile-Elasticsearch-Snapshot': isSnapshot,
|
||||
'Build-Date': ZonedDateTime.now(ZoneOffset.UTC),
|
||||
'Build-Java-Version': project.javaVersion)
|
||||
if (jarTask.manifest.attributes.containsKey('Change') == false) {
|
||||
@ -394,7 +400,7 @@ class BuildPlugin implements Plugin<Project> {
|
||||
// we use './temp' since this is per JVM and tests are forbidden from writing to CWD
|
||||
systemProperty 'java.io.tmpdir', './temp'
|
||||
systemProperty 'java.awt.headless', 'true'
|
||||
systemProperty 'tests.maven', 'true' // TODO: rename this once we've switched to gradle!
|
||||
systemProperty 'tests.gradle', 'true'
|
||||
systemProperty 'tests.artifact', project.name
|
||||
systemProperty 'tests.task', path
|
||||
systemProperty 'tests.security.manager', 'true'
|
||||
|
@ -51,6 +51,11 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
}
|
||||
|
||||
project.namingConventions {
|
||||
// Plugins decalare extensions of ESIntegTestCase as "Tests" instead of IT.
|
||||
skipIntegTestInDisguise = true
|
||||
}
|
||||
}
|
||||
createIntegTestTask(project)
|
||||
createBundleTask(project)
|
||||
@ -63,11 +68,10 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}"
|
||||
// we "upgrade" these optional deps to provided for plugins, since they will run
|
||||
// with a full elasticsearch server that includes optional deps
|
||||
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
provided "com.vividsolutions:jts:${project.versions.jts}"
|
||||
provided "log4j:log4j:${project.versions.log4j}"
|
||||
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
|
||||
provided "org.slf4j:slf4j-api:${project.versions.slf4j}"
|
||||
provided "net.java.dev.jna:jna:${project.versions.jna}"
|
||||
}
|
||||
}
|
||||
@ -101,11 +105,6 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
from pluginMetadata // metadata (eg custom security policy)
|
||||
from project.jar // this plugin's jar
|
||||
from project.configurations.runtime - project.configurations.provided // the dep jars
|
||||
// hack just for slf4j, in case it is "upgrade" from provided to compile,
|
||||
// since it is not actually provided in distributions
|
||||
from project.configurations.runtime.fileCollection { Dependency dep ->
|
||||
return dep.name == 'slf4j-api' && project.configurations.compile.dependencies.contains(dep)
|
||||
}
|
||||
// extra files for the plugin to go into the zip
|
||||
from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging
|
||||
from('src/main') {
|
||||
|
@ -68,11 +68,17 @@ class PluginPropertiesTask extends Copy {
|
||||
}
|
||||
|
||||
Map generateSubstitutions() {
|
||||
def stringSnap = { version ->
|
||||
if (version.endsWith("-SNAPSHOT")) {
|
||||
return version.substring(0, version.length() - 9)
|
||||
}
|
||||
return version
|
||||
}
|
||||
return [
|
||||
'name': extension.name,
|
||||
'description': extension.description,
|
||||
'version': extension.version,
|
||||
'elasticsearchVersion': VersionProperties.elasticsearch,
|
||||
'version': stringSnap(extension.version),
|
||||
'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch),
|
||||
'javaVersion': project.targetCompatibility as String,
|
||||
'isolated': extension.isolated as String,
|
||||
'classname': extension.classname
|
||||
|
@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
/**
|
||||
* Runs NamingConventionsCheck on a classpath/directory combo to verify that
|
||||
* tests are named according to our conventions so they'll be picked up by
|
||||
* gradle. Read the Javadoc for NamingConventionsCheck to learn more.
|
||||
*/
|
||||
public class NamingConventionsTask extends LoggedExec {
|
||||
/**
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
@OutputFile
|
||||
File successMarker = new File(project.buildDir, 'markers/namingConventions')
|
||||
|
||||
/**
|
||||
* The classpath to run the naming conventions checks against. Must contain the files in the test
|
||||
* output directory and everything required to load those classes.
|
||||
*
|
||||
* We don't declare the actual test files as a dependency or input because if they change then
|
||||
* this will change.
|
||||
*/
|
||||
@InputFiles
|
||||
FileCollection classpath = project.sourceSets.test.runtimeClasspath
|
||||
|
||||
/**
|
||||
* Should we skip the integ tests in disguise tests? Defaults to true because only core names its
|
||||
* integ tests correctly.
|
||||
*/
|
||||
@Input
|
||||
boolean skipIntegTestInDisguise = false
|
||||
|
||||
public NamingConventionsTask() {
|
||||
dependsOn(classpath)
|
||||
description = "Runs NamingConventionsCheck on ${classpath}"
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
onlyIf { project.sourceSets.test.output.classesDir.exists() }
|
||||
/*
|
||||
* We build the arguments in a funny afterEvaluate/doFirst closure so that we can wait for the classpath to be
|
||||
* ready for us. Strangely neither one on their own are good enough.
|
||||
*/
|
||||
project.afterEvaluate {
|
||||
doFirst {
|
||||
args('-cp', classpath.asPath, 'org.elasticsearch.test.NamingConventionsCheck')
|
||||
if (skipIntegTestInDisguise) {
|
||||
args('--skip-integ-tests-in-disguise')
|
||||
}
|
||||
/*
|
||||
* The test framework has classes that fail the checks to validate that the checks fail properly.
|
||||
* Since these would cause the build to fail we have to ignore them with this parameter. The
|
||||
* process of ignoring them lets us validate that they were found so this ignore parameter acts
|
||||
* as the test for the NamingConventionsCheck.
|
||||
*/
|
||||
if (':test:framework'.equals(project.path)) {
|
||||
args('--self-test')
|
||||
}
|
||||
args('--', project.sourceSets.test.output.classesDir.absolutePath)
|
||||
}
|
||||
}
|
||||
doLast { successMarker.setText("", 'UTF-8') }
|
||||
}
|
||||
}
|
@ -33,6 +33,7 @@ class PrecommitTasks {
|
||||
List<Task> precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
configureCheckstyle(project),
|
||||
configureNamingConventions(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class),
|
||||
@ -109,4 +110,11 @@ class PrecommitTasks {
|
||||
}
|
||||
return checkstyleTask
|
||||
}
|
||||
|
||||
private static Task configureNamingConventions(Project project) {
|
||||
if (project.sourceSets.findByName("test")) {
|
||||
return project.tasks.create('namingConventions', NamingConventionsTask)
|
||||
}
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
@ -245,7 +245,8 @@ class ClusterFormationTasks {
|
||||
return setup
|
||||
}
|
||||
Copy copyConfig = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||
copyConfig.into(new File(node.homeDir, 'config')) // copy must always have a general dest dir, even though we don't use it
|
||||
File configDir = new File(node.homeDir, 'config')
|
||||
copyConfig.into(configDir) // copy must always have a general dest dir, even though we don't use it
|
||||
for (Map.Entry<String,Object> extraConfigFile : node.config.extraConfigFiles.entrySet()) {
|
||||
copyConfig.doFirst {
|
||||
// make sure the copy won't be a no-op or act on a directory
|
||||
@ -258,9 +259,12 @@ class ClusterFormationTasks {
|
||||
}
|
||||
}
|
||||
File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey())
|
||||
copyConfig.into(destConfigFile.canonicalFile.parentFile)
|
||||
.from({ extraConfigFile.getValue() }) // wrap in closure to delay resolution to execution time
|
||||
.rename { destConfigFile.name }
|
||||
// wrap source file in closure to delay resolution to execution time
|
||||
copyConfig.from({ extraConfigFile.getValue() }) {
|
||||
// this must be in a closure so it is only applied to the single file specified in from above
|
||||
into(configDir.toPath().relativize(destConfigFile.canonicalFile.parentFile.toPath()).toFile())
|
||||
rename { destConfigFile.name }
|
||||
}
|
||||
}
|
||||
return copyConfig
|
||||
}
|
||||
|
@ -207,16 +207,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]ShardSearchFailure.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportClearScrollAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportMultiSearchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportSearchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportSearchScrollAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchDfsQueryAndFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchDfsQueryThenFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchHelper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchQueryAndFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchQueryThenFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchScrollQueryAndFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchScrollQueryThenFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchTypeAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]suggest[/\\]SuggestResponse.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]suggest[/\\]TransportSuggestAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]ActionFilter.java" checks="LineLength" />
|
||||
@ -374,9 +364,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]Channels.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]stream[/\\]NamedWriteableRegistry.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]joda[/\\]Joda.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]logging[/\\]ESLoggerFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]logging[/\\]Loggers.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]logging[/\\]log4j[/\\]LogConfigurator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]Lucene.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]all[/\\]AllTermQuery.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]index[/\\]ElasticsearchDirectoryReader.java" checks="LineLength" />
|
||||
@ -759,7 +746,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchModule.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]action[/\\]SearchServiceTransportAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalAggregation.java" checks="LineLength" />
|
||||
@ -1500,7 +1486,6 @@
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptEngineService.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BucketScriptTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BulkTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DateRangeTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DoubleTermsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]EquivalenceTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]FunctionScoreTests.java" checks="LineLength" />
|
||||
@ -1538,7 +1523,6 @@
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuNormalizerTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IndexableBinaryStringTools.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisTestUtils.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]TestIndexableBinaryStringTools.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-kuromoji[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]JapaneseStopTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-kuromoji[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]KuromojiAnalysisTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-phonetic[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PhoneticTokenFilterFactory.java" checks="LineLength" />
|
||||
@ -1572,13 +1556,6 @@
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]JavaScriptScriptMultiThreadedTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]JavaScriptSecurityTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]SimpleBench.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]Definition.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]PainlessPlugin.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]ConditionalTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]FieldTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]FloatOverflowEnabledTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]IntegerOverflowEnabledTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]ScriptEngineTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugin[/\\]python[/\\]PythonPlugin.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptEngineTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptMultiThreadedTests.java" checks="LineLength" />
|
||||
@ -1626,7 +1603,6 @@
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CheckFileCommandTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeUnitTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-client[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]smoketest[/\\]ESSmokeClientTestCase.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]AbstractMustacheTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]CombineProcessorsTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]IngestDocumentMustacheIT.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]IngestMustacheSetProcessorIT.java" checks="LineLength" />
|
||||
|
@ -33,20 +33,6 @@ java.util.Formatter#<init>(java.lang.String,java.lang.String,java.util.Locale)
|
||||
java.io.RandomAccessFile
|
||||
java.nio.file.Path#toFile()
|
||||
|
||||
@defaultMessage Don't use deprecated lucene apis
|
||||
org.apache.lucene.index.DocsEnum
|
||||
org.apache.lucene.index.DocsAndPositionsEnum
|
||||
org.apache.lucene.queries.TermFilter
|
||||
org.apache.lucene.queries.TermsFilter
|
||||
org.apache.lucene.search.Filter
|
||||
org.apache.lucene.search.FilteredQuery
|
||||
org.apache.lucene.search.TermRangeFilter
|
||||
org.apache.lucene.search.NumericRangeFilter
|
||||
org.apache.lucene.search.PrefixFilter
|
||||
org.apache.lucene.search.QueryWrapperFilter
|
||||
org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter
|
||||
org.apache.lucene.index.IndexWriter#isLocked(org.apache.lucene.store.Directory)
|
||||
|
||||
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
|
||||
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
|
||||
|
||||
|
@ -41,14 +41,10 @@ org.apache.lucene.index.IndexReader#addReaderClosedListener(org.apache.lucene.in
|
||||
org.apache.lucene.index.IndexReader#removeReaderClosedListener(org.apache.lucene.index.IndexReader$ReaderClosedListener)
|
||||
|
||||
@defaultMessage Pass the precision step from the mappings explicitly instead
|
||||
org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
|
||||
@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead.
|
||||
java.lang.Object#wait()
|
||||
@ -88,9 +84,6 @@ java.util.concurrent.Future#cancel(boolean)
|
||||
org.elasticsearch.common.io.PathUtils#get(java.lang.String, java.lang.String[])
|
||||
org.elasticsearch.common.io.PathUtils#get(java.net.URI)
|
||||
|
||||
@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
|
||||
org.apache.lucene.search.Query#setBoost(float)
|
||||
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||
org.joda.time.DateTime#<init>()
|
||||
org.joda.time.DateTime#<init>(long)
|
||||
|
@ -1,10 +1,10 @@
|
||||
elasticsearch = 3.0.0-SNAPSHOT
|
||||
lucene = 5.5.0-snapshot-850c6c2
|
||||
elasticsearch = 5.0.0
|
||||
lucene = 6.0.0-snapshot-bea235f
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.5
|
||||
spatial4j = 0.6
|
||||
jts = 1.13
|
||||
jackson = 2.6.2
|
||||
jackson = 2.7.1
|
||||
log4j = 1.2.17
|
||||
slf4j = 1.6.2
|
||||
jna = 4.1.0
|
||||
@ -13,6 +13,8 @@ jna = 4.1.0
|
||||
# test dependencies
|
||||
randomizedrunner = 2.3.2
|
||||
junit = 4.11
|
||||
# TODO: Upgrade httpclient to a version > 4.5.1 once released. Then remove o.e.test.rest.client.StrictHostnameVerifier* and use
|
||||
# DefaultHostnameVerifier instead since we no longer need to workaround https://issues.apache.org/jira/browse/HTTPCLIENT-1698
|
||||
httpclient = 4.3.6
|
||||
httpcore = 4.3.3
|
||||
commonslogging = 1.1.3
|
||||
|
@ -42,6 +42,7 @@ dependencies {
|
||||
compile "org.apache.lucene:lucene-queryparser:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-sandbox:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial-extras:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
|
||||
|
||||
@ -71,13 +72,12 @@ dependencies {
|
||||
compile 'org.hdrhistogram:HdrHistogram:2.1.6'
|
||||
|
||||
// lucene spatial
|
||||
compile "com.spatial4j:spatial4j:${versions.spatial4j}", optional
|
||||
compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional
|
||||
compile "com.vividsolutions:jts:${versions.jts}", optional
|
||||
|
||||
// logging
|
||||
compile "log4j:log4j:${versions.log4j}", optional
|
||||
compile "log4j:apache-log4j-extras:${versions.log4j}", optional
|
||||
compile "org.slf4j:slf4j-api:${versions.slf4j}", optional
|
||||
|
||||
compile "net.java.dev.jna:jna:${versions.jna}", optional
|
||||
|
||||
@ -169,11 +169,6 @@ thirdPartyAudit.excludes = [
|
||||
'org.apache.commons.logging.Log',
|
||||
'org.apache.commons.logging.LogFactory',
|
||||
|
||||
// from org.apache.lucene.sandbox.queries.regex.JakartaRegexpCapabilities$JakartaRegexMatcher (lucene-sandbox)
|
||||
'org.apache.regexp.CharacterIterator',
|
||||
'org.apache.regexp.RE',
|
||||
'org.apache.regexp.REProgram',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
|
||||
'org.apache.tomcat.jni.Buffer',
|
||||
'org.apache.tomcat.jni.Library',
|
||||
@ -211,7 +206,7 @@ thirdPartyAudit.excludes = [
|
||||
'org.jboss.marshalling.MarshallingConfiguration',
|
||||
'org.jboss.marshalling.Unmarshaller',
|
||||
|
||||
// from com.spatial4j.core.io.GeoJSONReader (spatial4j)
|
||||
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
|
||||
'org.noggit.JSONParser',
|
||||
|
||||
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
|
||||
@ -224,8 +219,9 @@ thirdPartyAudit.excludes = [
|
||||
'org.osgi.util.tracker.ServiceTracker',
|
||||
'org.osgi.util.tracker.ServiceTrackerCustomizer',
|
||||
|
||||
'org.slf4j.impl.StaticMDCBinder',
|
||||
'org.slf4j.impl.StaticMarkerBinder',
|
||||
// from org.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional
|
||||
'org.slf4j.Logger',
|
||||
'org.slf4j.LoggerFactory',
|
||||
]
|
||||
|
||||
// dependency license are currently checked in distribution
|
||||
|
@ -33,7 +33,6 @@ import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.InPlaceMergeSorter;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
@ -247,14 +246,15 @@ public abstract class BlendedTermQuery extends Query {
|
||||
if (boosts != null) {
|
||||
boost = boosts[i];
|
||||
}
|
||||
builder.append(ToStringUtils.boost(boost));
|
||||
if (boost != 1f) {
|
||||
builder.append('^').append(boost);
|
||||
}
|
||||
builder.append(", ");
|
||||
}
|
||||
if (terms.length > 0) {
|
||||
builder.setLength(builder.length() - 2);
|
||||
}
|
||||
builder.append("])");
|
||||
builder.append(ToStringUtils.boost(getBoost()));
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
@ -165,7 +166,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getFieldQuerySingle(field, queryText, quoted);
|
||||
@ -267,7 +268,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return super.getFieldQuery(field, queryText, slop);
|
||||
@ -318,7 +319,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
}
|
||||
|
||||
@ -380,7 +381,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getFuzzyQuerySingle(field, termStr, minSimilarity);
|
||||
@ -445,7 +446,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getPrefixQuerySingle(field, termStr);
|
||||
@ -520,7 +521,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
for (String token : tlist) {
|
||||
clauses.add(new BooleanClause(super.getPrefixQuery(field, token), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
}
|
||||
|
||||
@ -575,7 +576,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getWildcardQuerySingle(field, termStr);
|
||||
@ -704,7 +705,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getRegexpQuerySingle(field, termStr);
|
||||
@ -739,10 +740,24 @@ public class MapperQueryParser extends QueryParser {
|
||||
setAnalyzer(oldAnalyzer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated review all use of this, don't rely on coord
|
||||
*/
|
||||
@Deprecated
|
||||
protected Query getBooleanQueryCoordDisabled(List<BooleanClause> clauses) throws ParseException {
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
builder.setDisableCoord(true);
|
||||
for (BooleanClause clause : clauses) {
|
||||
builder.add(clause);
|
||||
}
|
||||
return fixNegativeQueryIfNeeded(builder.build());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected Query getBooleanQuery(List<BooleanClause> clauses, boolean disableCoord) throws ParseException {
|
||||
Query q = super.getBooleanQuery(clauses, disableCoord);
|
||||
protected Query getBooleanQuery(List<BooleanClause> clauses) throws ParseException {
|
||||
Query q = super.getBooleanQuery(clauses);
|
||||
if (q == null) {
|
||||
return null;
|
||||
}
|
||||
@ -769,7 +784,6 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
pq = builder.build();
|
||||
//make sure that the boost hasn't been set beforehand, otherwise we'd lose it
|
||||
assert q.getBoost() == 1f;
|
||||
assert q instanceof BoostQuery == false;
|
||||
return pq;
|
||||
} else if (q instanceof MultiPhraseQuery) {
|
||||
|
@ -26,8 +26,7 @@ import java.io.IOException;
|
||||
/**
|
||||
* Abstract decorator class of a DocIdSetIterator
|
||||
* implementation that provides on-demand filter/validation
|
||||
* mechanism on an underlying DocIdSetIterator. See {@link
|
||||
* FilteredDocIdSet}.
|
||||
* mechanism on an underlying DocIdSetIterator.
|
||||
*/
|
||||
public abstract class XFilteredDocIdSetIterator extends DocIdSetIterator {
|
||||
protected DocIdSetIterator _innerIter;
|
||||
|
@ -87,7 +87,7 @@ public class CustomFieldQuery extends FieldQuery {
|
||||
if (numTerms > 16) {
|
||||
for (Term[] currentPosTerm : terms) {
|
||||
for (Term term : currentPosTerm) {
|
||||
super.flatten(new TermQuery(term), reader, flatQueries, orig.getBoost());
|
||||
super.flatten(new TermQuery(term), reader, flatQueries, 1F);
|
||||
}
|
||||
}
|
||||
return;
|
||||
@ -104,7 +104,7 @@ public class CustomFieldQuery extends FieldQuery {
|
||||
queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]);
|
||||
}
|
||||
Query query = queryBuilder.build();
|
||||
this.flatten(query, reader, flatQueries, orig.getBoost());
|
||||
this.flatten(query, reader, flatQueries, 1F);
|
||||
} else {
|
||||
Term[] t = terms.get(currentPos);
|
||||
for (int i = 0; i < t.length; i++) {
|
||||
|
@ -45,6 +45,7 @@ public class Build {
|
||||
static {
|
||||
final String shortHash;
|
||||
final String date;
|
||||
final boolean isSnapshot;
|
||||
|
||||
Path path = getElasticsearchCodebase();
|
||||
if (path.toString().endsWith(".jar")) {
|
||||
@ -52,6 +53,7 @@ public class Build {
|
||||
Manifest manifest = jar.getManifest();
|
||||
shortHash = manifest.getMainAttributes().getValue("Change");
|
||||
date = manifest.getMainAttributes().getValue("Build-Date");
|
||||
isSnapshot = "true".equals(manifest.getMainAttributes().getValue("X-Compile-Elasticsearch-Snapshot"));
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
@ -59,6 +61,7 @@ public class Build {
|
||||
// not running from a jar (unit tests, IDE)
|
||||
shortHash = "Unknown";
|
||||
date = "Unknown";
|
||||
isSnapshot = true;
|
||||
}
|
||||
if (shortHash == null) {
|
||||
throw new IllegalStateException("Error finding the build shortHash. " +
|
||||
@ -69,9 +72,11 @@ public class Build {
|
||||
"Stopping Elasticsearch now so it doesn't run in subtly broken ways. This is likely a build bug.");
|
||||
}
|
||||
|
||||
CURRENT = new Build(shortHash, date);
|
||||
CURRENT = new Build(shortHash, date, isSnapshot);
|
||||
}
|
||||
|
||||
private final boolean isSnapshot;
|
||||
|
||||
/**
|
||||
* Returns path to elasticsearch codebase path
|
||||
*/
|
||||
@ -88,9 +93,10 @@ public class Build {
|
||||
private String shortHash;
|
||||
private String date;
|
||||
|
||||
Build(String shortHash, String date) {
|
||||
Build(String shortHash, String date, boolean isSnapshot) {
|
||||
this.shortHash = shortHash;
|
||||
this.date = date;
|
||||
this.isSnapshot = isSnapshot;
|
||||
}
|
||||
|
||||
public String shortHash() {
|
||||
@ -104,12 +110,18 @@ public class Build {
|
||||
public static Build readBuild(StreamInput in) throws IOException {
|
||||
String hash = in.readString();
|
||||
String date = in.readString();
|
||||
return new Build(hash, date);
|
||||
boolean snapshot = in.readBoolean();
|
||||
return new Build(hash, date, snapshot);
|
||||
}
|
||||
|
||||
public static void writeBuild(Build build, StreamOutput out) throws IOException {
|
||||
out.writeString(build.shortHash());
|
||||
out.writeString(build.date());
|
||||
out.writeBoolean(build.isSnapshot());
|
||||
}
|
||||
|
||||
public boolean isSnapshot() {
|
||||
return isSnapshot;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -23,7 +23,7 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
@ -35,262 +35,36 @@ import java.io.IOException;
|
||||
@SuppressWarnings("deprecation")
|
||||
public class Version {
|
||||
|
||||
// The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is Beta/RC indicator
|
||||
// AA values below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
|
||||
// The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator
|
||||
// AA values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
|
||||
// the (internal) format of the id is there so we can easily do after/before checks on the id
|
||||
|
||||
// NOTE: indexes created with 3.6 use this constant for e.g. analysis chain emulation (imperfect)
|
||||
public static final org.apache.lucene.util.Version LUCENE_3_EMULATION_VERSION = org.apache.lucene.util.Version.LUCENE_4_0_0;
|
||||
|
||||
public static final int V_0_18_0_ID = /*00*/180099;
|
||||
public static final Version V_0_18_0 = new Version(V_0_18_0_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_1_ID = /*00*/180199;
|
||||
public static final Version V_0_18_1 = new Version(V_0_18_1_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_2_ID = /*00*/180299;
|
||||
public static final Version V_0_18_2 = new Version(V_0_18_2_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_3_ID = /*00*/180399;
|
||||
public static final Version V_0_18_3 = new Version(V_0_18_3_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_4_ID = /*00*/180499;
|
||||
public static final Version V_0_18_4 = new Version(V_0_18_4_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_5_ID = /*00*/180599;
|
||||
public static final Version V_0_18_5 = new Version(V_0_18_5_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_6_ID = /*00*/180699;
|
||||
public static final Version V_0_18_6 = new Version(V_0_18_6_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_7_ID = /*00*/180799;
|
||||
public static final Version V_0_18_7 = new Version(V_0_18_7_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_8_ID = /*00*/180899;
|
||||
public static final Version V_0_18_8 = new Version(V_0_18_8_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC1_ID = /*00*/190051;
|
||||
public static final Version V_0_19_0_RC1 = new Version(V_0_19_0_RC1_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC2_ID = /*00*/190052;
|
||||
public static final Version V_0_19_0_RC2 = new Version(V_0_19_0_RC2_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC3_ID = /*00*/190053;
|
||||
public static final Version V_0_19_0_RC3 = new Version(V_0_19_0_RC3_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_ID = /*00*/190099;
|
||||
public static final Version V_0_19_0 = new Version(V_0_19_0_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_1_ID = /*00*/190199;
|
||||
public static final Version V_0_19_1 = new Version(V_0_19_1_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_2_ID = /*00*/190299;
|
||||
public static final Version V_0_19_2 = new Version(V_0_19_2_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_3_ID = /*00*/190399;
|
||||
public static final Version V_0_19_3 = new Version(V_0_19_3_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_4_ID = /*00*/190499;
|
||||
public static final Version V_0_19_4 = new Version(V_0_19_4_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_5_ID = /*00*/190599;
|
||||
public static final Version V_0_19_5 = new Version(V_0_19_5_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_6_ID = /*00*/190699;
|
||||
public static final Version V_0_19_6 = new Version(V_0_19_6_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_7_ID = /*00*/190799;
|
||||
public static final Version V_0_19_7 = new Version(V_0_19_7_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_8_ID = /*00*/190899;
|
||||
public static final Version V_0_19_8 = new Version(V_0_19_8_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_9_ID = /*00*/190999;
|
||||
public static final Version V_0_19_9 = new Version(V_0_19_9_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_10_ID = /*00*/191099;
|
||||
public static final Version V_0_19_10 = new Version(V_0_19_10_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_11_ID = /*00*/191199;
|
||||
public static final Version V_0_19_11 = new Version(V_0_19_11_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_12_ID = /*00*/191299;
|
||||
public static final Version V_0_19_12 = new Version(V_0_19_12_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_13_ID = /*00*/191399;
|
||||
public static final Version V_0_19_13 = new Version(V_0_19_13_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_20_0_RC1_ID = /*00*/200051;
|
||||
public static final Version V_0_20_0_RC1 = new Version(V_0_20_0_RC1_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_0_ID = /*00*/200099;
|
||||
public static final Version V_0_20_0 = new Version(V_0_20_0_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_1_ID = /*00*/200199;
|
||||
public static final Version V_0_20_1 = new Version(V_0_20_1_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_2_ID = /*00*/200299;
|
||||
public static final Version V_0_20_2 = new Version(V_0_20_2_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_3_ID = /*00*/200399;
|
||||
public static final Version V_0_20_3 = new Version(V_0_20_3_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_4_ID = /*00*/200499;
|
||||
public static final Version V_0_20_4 = new Version(V_0_20_4_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_5_ID = /*00*/200599;
|
||||
public static final Version V_0_20_5 = new Version(V_0_20_5_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_6_ID = /*00*/200699;
|
||||
public static final Version V_0_20_6 = new Version(V_0_20_6_ID, false, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_7_ID = /*00*/200799;
|
||||
public static final Version V_0_20_7 = new Version(V_0_20_7_ID, true, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_90_0_Beta1_ID = /*00*/900001;
|
||||
public static final Version V_0_90_0_Beta1 = new Version(V_0_90_0_Beta1_ID, false, org.apache.lucene.util.Version.LUCENE_4_1);
|
||||
public static final int V_0_90_0_RC1_ID = /*00*/900051;
|
||||
public static final Version V_0_90_0_RC1 = new Version(V_0_90_0_RC1_ID, false, org.apache.lucene.util.Version.LUCENE_4_1);
|
||||
public static final int V_0_90_0_RC2_ID = /*00*/900052;
|
||||
public static final Version V_0_90_0_RC2 = new Version(V_0_90_0_RC2_ID, false, org.apache.lucene.util.Version.LUCENE_4_2);
|
||||
public static final int V_0_90_0_ID = /*00*/900099;
|
||||
public static final Version V_0_90_0 = new Version(V_0_90_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_2);
|
||||
public static final int V_0_90_1_ID = /*00*/900199;
|
||||
public static final Version V_0_90_1 = new Version(V_0_90_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_3);
|
||||
public static final int V_0_90_2_ID = /*00*/900299;
|
||||
public static final Version V_0_90_2 = new Version(V_0_90_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_3);
|
||||
public static final int V_0_90_3_ID = /*00*/900399;
|
||||
public static final Version V_0_90_3 = new Version(V_0_90_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_4_ID = /*00*/900499;
|
||||
public static final Version V_0_90_4 = new Version(V_0_90_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_5_ID = /*00*/900599;
|
||||
public static final Version V_0_90_5 = new Version(V_0_90_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_6_ID = /*00*/900699;
|
||||
public static final Version V_0_90_6 = new Version(V_0_90_6_ID, false, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_0_90_7_ID = /*00*/900799;
|
||||
public static final Version V_0_90_7 = new Version(V_0_90_7_ID, false, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_0_90_8_ID = /*00*/900899;
|
||||
public static final Version V_0_90_8 = new Version(V_0_90_8_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_9_ID = /*00*/900999;
|
||||
public static final Version V_0_90_9 = new Version(V_0_90_9_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_10_ID = /*00*/901099;
|
||||
public static final Version V_0_90_10 = new Version(V_0_90_10_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_11_ID = /*00*/901199;
|
||||
public static final Version V_0_90_11 = new Version(V_0_90_11_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_12_ID = /*00*/901299;
|
||||
public static final Version V_0_90_12 = new Version(V_0_90_12_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_13_ID = /*00*/901399;
|
||||
public static final Version V_0_90_13 = new Version(V_0_90_13_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_14_ID = /*00*/901499;
|
||||
public static final Version V_0_90_14 = new Version(V_0_90_14_ID, true, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
|
||||
public static final int V_1_0_0_Beta1_ID = 1000001;
|
||||
public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, false, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_1_0_0_Beta2_ID = 1000002;
|
||||
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_RC1_ID = 1000051;
|
||||
public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_RC2_ID = 1000052;
|
||||
public static final Version V_1_0_0_RC2 = new Version(V_1_0_0_RC2_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_ID = 1000099;
|
||||
public static final Version V_1_0_0 = new Version(V_1_0_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_1_ID = 1000199;
|
||||
public static final Version V_1_0_1 = new Version(V_1_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_2_ID = 1000299;
|
||||
public static final Version V_1_0_2 = new Version(V_1_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_3_ID = 1000399;
|
||||
public static final Version V_1_0_3 = new Version(V_1_0_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_4_ID = 1000499;
|
||||
public static final Version V_1_0_4 = new Version(V_1_0_4_ID, true, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_1_0_ID = 1010099;
|
||||
public static final Version V_1_1_0 = new Version(V_1_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_1_1_ID = 1010199;
|
||||
public static final Version V_1_1_1 = new Version(V_1_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_1_2_ID = 1010299;
|
||||
public static final Version V_1_1_2 = new Version(V_1_1_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_2_0_ID = 1020099;
|
||||
public static final Version V_1_2_0 = new Version(V_1_2_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_1_ID = 1020199;
|
||||
public static final Version V_1_2_1 = new Version(V_1_2_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_2_ID = 1020299;
|
||||
public static final Version V_1_2_2 = new Version(V_1_2_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_3_ID = 1020399;
|
||||
public static final Version V_1_2_3 = new Version(V_1_2_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_4_ID = 1020499;
|
||||
public static final Version V_1_2_4 = new Version(V_1_2_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_5_ID = 1020599;
|
||||
public static final Version V_1_2_5 = new Version(V_1_2_5_ID, true, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_3_0_ID = 1030099;
|
||||
public static final Version V_1_3_0 = new Version(V_1_3_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_1_ID = 1030199;
|
||||
public static final Version V_1_3_1 = new Version(V_1_3_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_2_ID = 1030299;
|
||||
public static final Version V_1_3_2 = new Version(V_1_3_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_3_ID = 1030399;
|
||||
public static final Version V_1_3_3 = new Version(V_1_3_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_4_ID = 1030499;
|
||||
public static final Version V_1_3_4 = new Version(V_1_3_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_5_ID = 1030599;
|
||||
public static final Version V_1_3_5 = new Version(V_1_3_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_6_ID = 1030699;
|
||||
public static final Version V_1_3_6 = new Version(V_1_3_6_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_7_ID = 1030799;
|
||||
public static final Version V_1_3_7 = new Version(V_1_3_7_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_8_ID = 1030899;
|
||||
public static final Version V_1_3_8 = new Version(V_1_3_8_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_9_ID = 1030999;
|
||||
public static final Version V_1_3_9 = new Version(V_1_3_9_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_10_ID = /*00*/1031099;
|
||||
public static final Version V_1_3_10 = new Version(V_1_3_10_ID, true, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_4_0_Beta1_ID = 1040001;
|
||||
public static final Version V_1_4_0_Beta1 = new Version(V_1_4_0_Beta1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_1);
|
||||
public static final int V_1_4_0_ID = 1040099;
|
||||
public static final Version V_1_4_0 = new Version(V_1_4_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_1_ID = 1040199;
|
||||
public static final Version V_1_4_1 = new Version(V_1_4_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_2_ID = 1040299;
|
||||
public static final Version V_1_4_2 = new Version(V_1_4_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_3_ID = 1040399;
|
||||
public static final Version V_1_4_3 = new Version(V_1_4_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_1_4_4_ID = 1040499;
|
||||
public static final Version V_1_4_4 = new Version(V_1_4_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_1_4_5_ID = 1040599;
|
||||
public static final Version V_1_4_5 = new Version(V_1_4_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_4_6_ID = 1040699;
|
||||
public static final Version V_1_4_6 = new Version(V_1_4_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_0_ID = 1050099;
|
||||
public static final Version V_1_5_0 = new Version(V_1_5_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_1_ID = 1050199;
|
||||
public static final Version V_1_5_1 = new Version(V_1_5_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_2_ID = 1050299;
|
||||
public static final Version V_1_5_2 = new Version(V_1_5_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_3_ID = 1050399;
|
||||
public static final Version V_1_5_3 = new Version(V_1_5_3_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_0_ID = 1060099;
|
||||
public static final Version V_1_6_0 = new Version(V_1_6_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_1_ID = 1060199;
|
||||
public static final Version V_1_6_1 = new Version(V_1_6_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_2_ID = 1060299;
|
||||
public static final Version V_1_6_2 = new Version(V_1_6_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_3_ID = 1060399;
|
||||
public static final Version V_1_6_3 = new Version(V_1_6_3_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_0_ID = 1070099;
|
||||
public static final Version V_1_7_0 = new Version(V_1_7_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_1_ID = 1070199;
|
||||
public static final Version V_1_7_1 = new Version(V_1_7_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_2_ID = 1070299;
|
||||
public static final Version V_1_7_2 = new Version(V_1_7_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_3_ID = 1070399;
|
||||
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_4_ID = 1070499;
|
||||
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_5_ID = 1070599;
|
||||
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_6_ID = 1070699;
|
||||
public static final Version V_1_7_6 = new Version(V_1_7_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
|
||||
public static final int V_2_0_0_beta1_ID = 2000001;
|
||||
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_0_beta2_ID = 2000002;
|
||||
public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_0_rc1_ID = 2000051;
|
||||
public static final Version V_2_0_0_rc1 = new Version(V_2_0_0_rc1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_0_rc1 = new Version(V_2_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_0_ID = 2000099;
|
||||
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_1_ID = 2000199;
|
||||
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_2_ID = 2000299;
|
||||
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_3_ID = 2000399;
|
||||
public static final Version V_2_0_3 = new Version(V_2_0_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_1_0_ID = 2010099;
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_1_ID = 2010199;
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_2_ID = 2010299;
|
||||
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_3_ID = 2010399;
|
||||
public static final Version V_2_1_3 = new Version(V_2_1_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_2_0_ID = 2020099;
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_2_1_ID = 2020199;
|
||||
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_3_0_ID = 2030099;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_3_0_0_ID = 3000099;
|
||||
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final Version CURRENT = V_3_0_0;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_5_0_0_ID = 5000099;
|
||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
@ -303,24 +77,18 @@ public class Version {
|
||||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_3_0_0_ID:
|
||||
return V_3_0_0;
|
||||
case V_5_0_0_ID:
|
||||
return V_5_0_0;
|
||||
case V_2_3_0_ID:
|
||||
return V_2_3_0;
|
||||
case V_2_2_1_ID:
|
||||
return V_2_2_1;
|
||||
case V_2_2_0_ID:
|
||||
return V_2_2_0;
|
||||
case V_2_1_3_ID:
|
||||
return V_2_1_3;
|
||||
case V_2_1_2_ID:
|
||||
return V_2_1_2;
|
||||
case V_2_1_1_ID:
|
||||
return V_2_1_1;
|
||||
case V_2_1_0_ID:
|
||||
return V_2_1_0;
|
||||
case V_2_0_3_ID:
|
||||
return V_2_0_3;
|
||||
case V_2_0_2_ID:
|
||||
return V_2_0_2;
|
||||
case V_2_0_1_ID:
|
||||
@ -333,218 +101,8 @@ public class Version {
|
||||
return V_2_0_0_beta2;
|
||||
case V_2_0_0_beta1_ID:
|
||||
return V_2_0_0_beta1;
|
||||
case V_1_7_6_ID:
|
||||
return V_1_7_6;
|
||||
case V_1_7_5_ID:
|
||||
return V_1_7_5;
|
||||
case V_1_7_4_ID:
|
||||
return V_1_7_4;
|
||||
case V_1_7_3_ID:
|
||||
return V_1_7_3;
|
||||
case V_1_7_2_ID:
|
||||
return V_1_7_2;
|
||||
case V_1_7_1_ID:
|
||||
return V_1_7_1;
|
||||
case V_1_7_0_ID:
|
||||
return V_1_7_0;
|
||||
case V_1_6_3_ID:
|
||||
return V_1_6_3;
|
||||
case V_1_6_2_ID:
|
||||
return V_1_6_2;
|
||||
case V_1_6_1_ID:
|
||||
return V_1_6_1;
|
||||
case V_1_6_0_ID:
|
||||
return V_1_6_0;
|
||||
case V_1_5_3_ID:
|
||||
return V_1_5_3;
|
||||
case V_1_5_2_ID:
|
||||
return V_1_5_2;
|
||||
case V_1_5_1_ID:
|
||||
return V_1_5_1;
|
||||
case V_1_5_0_ID:
|
||||
return V_1_5_0;
|
||||
case V_1_4_6_ID:
|
||||
return V_1_4_6;
|
||||
case V_1_4_5_ID:
|
||||
return V_1_4_5;
|
||||
case V_1_4_4_ID:
|
||||
return V_1_4_4;
|
||||
case V_1_4_3_ID:
|
||||
return V_1_4_3;
|
||||
case V_1_4_2_ID:
|
||||
return V_1_4_2;
|
||||
case V_1_4_1_ID:
|
||||
return V_1_4_1;
|
||||
case V_1_4_0_ID:
|
||||
return V_1_4_0;
|
||||
case V_1_4_0_Beta1_ID:
|
||||
return V_1_4_0_Beta1;
|
||||
case V_1_3_10_ID:
|
||||
return V_1_3_10;
|
||||
case V_1_3_9_ID:
|
||||
return V_1_3_9;
|
||||
case V_1_3_8_ID:
|
||||
return V_1_3_8;
|
||||
case V_1_3_7_ID:
|
||||
return V_1_3_7;
|
||||
case V_1_3_6_ID:
|
||||
return V_1_3_6;
|
||||
case V_1_3_5_ID:
|
||||
return V_1_3_5;
|
||||
case V_1_3_4_ID:
|
||||
return V_1_3_4;
|
||||
case V_1_3_3_ID:
|
||||
return V_1_3_3;
|
||||
case V_1_3_2_ID:
|
||||
return V_1_3_2;
|
||||
case V_1_3_1_ID:
|
||||
return V_1_3_1;
|
||||
case V_1_3_0_ID:
|
||||
return V_1_3_0;
|
||||
case V_1_2_5_ID:
|
||||
return V_1_2_5;
|
||||
case V_1_2_4_ID:
|
||||
return V_1_2_4;
|
||||
case V_1_2_3_ID:
|
||||
return V_1_2_3;
|
||||
case V_1_2_2_ID:
|
||||
return V_1_2_2;
|
||||
case V_1_2_1_ID:
|
||||
return V_1_2_1;
|
||||
case V_1_2_0_ID:
|
||||
return V_1_2_0;
|
||||
case V_1_1_2_ID:
|
||||
return V_1_1_2;
|
||||
case V_1_1_1_ID:
|
||||
return V_1_1_1;
|
||||
case V_1_1_0_ID:
|
||||
return V_1_1_0;
|
||||
case V_1_0_4_ID:
|
||||
return V_1_0_4;
|
||||
case V_1_0_3_ID:
|
||||
return V_1_0_3;
|
||||
case V_1_0_2_ID:
|
||||
return V_1_0_2;
|
||||
case V_1_0_1_ID:
|
||||
return V_1_0_1;
|
||||
case V_1_0_0_ID:
|
||||
return V_1_0_0;
|
||||
case V_1_0_0_RC2_ID:
|
||||
return V_1_0_0_RC2;
|
||||
case V_1_0_0_RC1_ID:
|
||||
return V_1_0_0_RC1;
|
||||
case V_1_0_0_Beta2_ID:
|
||||
return V_1_0_0_Beta2;
|
||||
case V_1_0_0_Beta1_ID:
|
||||
return V_1_0_0_Beta1;
|
||||
case V_0_90_14_ID:
|
||||
return V_0_90_14;
|
||||
case V_0_90_13_ID:
|
||||
return V_0_90_13;
|
||||
case V_0_90_12_ID:
|
||||
return V_0_90_12;
|
||||
case V_0_90_11_ID:
|
||||
return V_0_90_11;
|
||||
case V_0_90_10_ID:
|
||||
return V_0_90_10;
|
||||
case V_0_90_9_ID:
|
||||
return V_0_90_9;
|
||||
case V_0_90_8_ID:
|
||||
return V_0_90_8;
|
||||
case V_0_90_7_ID:
|
||||
return V_0_90_7;
|
||||
case V_0_90_6_ID:
|
||||
return V_0_90_6;
|
||||
case V_0_90_5_ID:
|
||||
return V_0_90_5;
|
||||
case V_0_90_4_ID:
|
||||
return V_0_90_4;
|
||||
case V_0_90_3_ID:
|
||||
return V_0_90_3;
|
||||
case V_0_90_2_ID:
|
||||
return V_0_90_2;
|
||||
case V_0_90_1_ID:
|
||||
return V_0_90_1;
|
||||
case V_0_90_0_ID:
|
||||
return V_0_90_0;
|
||||
case V_0_90_0_RC2_ID:
|
||||
return V_0_90_0_RC2;
|
||||
case V_0_90_0_RC1_ID:
|
||||
return V_0_90_0_RC1;
|
||||
case V_0_90_0_Beta1_ID:
|
||||
return V_0_90_0_Beta1;
|
||||
case V_0_20_7_ID:
|
||||
return V_0_20_7;
|
||||
case V_0_20_6_ID:
|
||||
return V_0_20_6;
|
||||
case V_0_20_5_ID:
|
||||
return V_0_20_5;
|
||||
case V_0_20_4_ID:
|
||||
return V_0_20_4;
|
||||
case V_0_20_3_ID:
|
||||
return V_0_20_3;
|
||||
case V_0_20_2_ID:
|
||||
return V_0_20_2;
|
||||
case V_0_20_1_ID:
|
||||
return V_0_20_1;
|
||||
case V_0_20_0_ID:
|
||||
return V_0_20_0;
|
||||
case V_0_20_0_RC1_ID:
|
||||
return V_0_20_0_RC1;
|
||||
case V_0_19_0_RC1_ID:
|
||||
return V_0_19_0_RC1;
|
||||
case V_0_19_0_RC2_ID:
|
||||
return V_0_19_0_RC2;
|
||||
case V_0_19_0_RC3_ID:
|
||||
return V_0_19_0_RC3;
|
||||
case V_0_19_0_ID:
|
||||
return V_0_19_0;
|
||||
case V_0_19_1_ID:
|
||||
return V_0_19_1;
|
||||
case V_0_19_2_ID:
|
||||
return V_0_19_2;
|
||||
case V_0_19_3_ID:
|
||||
return V_0_19_3;
|
||||
case V_0_19_4_ID:
|
||||
return V_0_19_4;
|
||||
case V_0_19_5_ID:
|
||||
return V_0_19_5;
|
||||
case V_0_19_6_ID:
|
||||
return V_0_19_6;
|
||||
case V_0_19_7_ID:
|
||||
return V_0_19_7;
|
||||
case V_0_19_8_ID:
|
||||
return V_0_19_8;
|
||||
case V_0_19_9_ID:
|
||||
return V_0_19_9;
|
||||
case V_0_19_10_ID:
|
||||
return V_0_19_10;
|
||||
case V_0_19_11_ID:
|
||||
return V_0_19_11;
|
||||
case V_0_19_12_ID:
|
||||
return V_0_19_12;
|
||||
case V_0_19_13_ID:
|
||||
return V_0_19_13;
|
||||
case V_0_18_0_ID:
|
||||
return V_0_18_0;
|
||||
case V_0_18_1_ID:
|
||||
return V_0_18_1;
|
||||
case V_0_18_2_ID:
|
||||
return V_0_18_2;
|
||||
case V_0_18_3_ID:
|
||||
return V_0_18_3;
|
||||
case V_0_18_4_ID:
|
||||
return V_0_18_4;
|
||||
case V_0_18_5_ID:
|
||||
return V_0_18_5;
|
||||
case V_0_18_6_ID:
|
||||
return V_0_18_6;
|
||||
case V_0_18_7_ID:
|
||||
return V_0_18_7;
|
||||
case V_0_18_8_ID:
|
||||
return V_0_18_8;
|
||||
default:
|
||||
return new Version(id, false, org.apache.lucene.util.Version.LATEST);
|
||||
return new Version(id, org.apache.lucene.util.Version.LATEST);
|
||||
}
|
||||
}
|
||||
|
||||
@ -579,7 +137,7 @@ public class Version {
|
||||
if (!Strings.hasLength(version)) {
|
||||
return Version.CURRENT;
|
||||
}
|
||||
final boolean snapshot;
|
||||
final boolean snapshot; // this is some BWC for 2.x and before indices
|
||||
if (snapshot = version.endsWith("-SNAPSHOT")) {
|
||||
version = version.substring(0, version.length() - 9);
|
||||
}
|
||||
@ -589,9 +147,13 @@ public class Version {
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
final int rawMajor = Integer.parseInt(parts[0]);
|
||||
if (rawMajor >= 5 && snapshot) { // we don't support snapshot as part of the version here anymore
|
||||
throw new IllegalArgumentException("illegal version format - snapshots are only supported until version 2.x");
|
||||
}
|
||||
final int betaOffset = rawMajor < 5 ? 0 : 25;
|
||||
//we reverse the version id calculation based on some assumption as we can't reliably reverse the modulo
|
||||
final int major = Integer.parseInt(parts[0]) * 1000000;
|
||||
final int major = rawMajor * 1000000;
|
||||
final int minor = Integer.parseInt(parts[1]) * 10000;
|
||||
final int revision = Integer.parseInt(parts[2]) * 100;
|
||||
|
||||
@ -599,19 +161,21 @@ public class Version {
|
||||
int build = 99;
|
||||
if (parts.length == 4) {
|
||||
String buildStr = parts[3];
|
||||
if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) {
|
||||
build = Integer.parseInt(buildStr.substring(4));
|
||||
}
|
||||
if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) {
|
||||
if (buildStr.startsWith("alpha")) {
|
||||
assert rawMajor >= 5 : "major must be >= 5 but was " + major;
|
||||
build = Integer.parseInt(buildStr.substring(5));
|
||||
assert build < 25 : "expected a beta build but " + build + " >= 25";
|
||||
} else if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) {
|
||||
build = betaOffset + Integer.parseInt(buildStr.substring(4));
|
||||
assert build < 50 : "expected a beta build but " + build + " >= 50";
|
||||
} else if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) {
|
||||
build = Integer.parseInt(buildStr.substring(2)) + 50;
|
||||
} else {
|
||||
throw new IllegalArgumentException("unable to parse version " + version);
|
||||
}
|
||||
}
|
||||
|
||||
final Version versionFromId = fromId(major + minor + revision + build);
|
||||
if (snapshot != versionFromId.snapshot()) {
|
||||
return new Version(versionFromId.id, snapshot, versionFromId.luceneVersion);
|
||||
}
|
||||
return versionFromId;
|
||||
return fromId(major + minor + revision + build);
|
||||
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("unable to parse version " + version, e);
|
||||
@ -623,23 +187,17 @@ public class Version {
|
||||
public final byte minor;
|
||||
public final byte revision;
|
||||
public final byte build;
|
||||
public final Boolean snapshot;
|
||||
public final org.apache.lucene.util.Version luceneVersion;
|
||||
|
||||
Version(int id, boolean snapshot, org.apache.lucene.util.Version luceneVersion) {
|
||||
Version(int id, org.apache.lucene.util.Version luceneVersion) {
|
||||
this.id = id;
|
||||
this.major = (byte) ((id / 1000000) % 100);
|
||||
this.minor = (byte) ((id / 10000) % 100);
|
||||
this.revision = (byte) ((id / 100) % 100);
|
||||
this.build = (byte) (id % 100);
|
||||
this.snapshot = snapshot;
|
||||
this.luceneVersion = luceneVersion;
|
||||
}
|
||||
|
||||
public boolean snapshot() {
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
public boolean after(Version version) {
|
||||
return version.id < id;
|
||||
}
|
||||
@ -667,30 +225,6 @@ public class Version {
|
||||
return Version.smallest(this, fromId(major * 1000000 + 99));
|
||||
}
|
||||
|
||||
/**
|
||||
* Just the version number (without -SNAPSHOT if snapshot).
|
||||
*/
|
||||
public String number() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(major).append('.').append(minor).append('.').append(revision);
|
||||
if (isBeta()) {
|
||||
if (major >= 2) {
|
||||
sb.append("-beta");
|
||||
} else {
|
||||
sb.append(".Beta");
|
||||
}
|
||||
sb.append(build);
|
||||
} else if (build < 99) {
|
||||
if (major >= 2) {
|
||||
sb.append("-rc");
|
||||
} else {
|
||||
sb.append(".RC");
|
||||
}
|
||||
sb.append(build - 50);
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "System.out.*")
|
||||
public static void main(String[] args) {
|
||||
System.out.println("Version: " + Version.CURRENT + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() + ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
@ -699,9 +233,24 @@ public class Version {
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(number());
|
||||
if (snapshot()) {
|
||||
sb.append("-SNAPSHOT");
|
||||
sb.append(major).append('.').append(minor).append('.').append(revision);
|
||||
if (isAlpha()) {
|
||||
sb.append("-alpha");
|
||||
sb.append(build);
|
||||
} else if (isBeta()) {
|
||||
if (major >= 2) {
|
||||
sb.append("-beta");
|
||||
} else {
|
||||
sb.append(".Beta");
|
||||
}
|
||||
sb.append(major < 5 ? build : build-25);
|
||||
} else if (build < 99) {
|
||||
if (major >= 2) {
|
||||
sb.append("-rc");
|
||||
} else {
|
||||
sb.append(".RC");
|
||||
}
|
||||
sb.append(build - 50);
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
@ -730,7 +279,16 @@ public class Version {
|
||||
}
|
||||
|
||||
public boolean isBeta() {
|
||||
return build < 50;
|
||||
return major < 5 ? build < 50 : build >= 25 && build < 50;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true iff this version is an alpha version
|
||||
* Note: This has been introduced in elasticsearch version 5. Previous versions will never
|
||||
* have an alpha version.
|
||||
*/
|
||||
public boolean isAlpha() {
|
||||
return major < 5 ? false : build < 25;
|
||||
}
|
||||
|
||||
public boolean isRC() {
|
||||
|
@ -21,18 +21,16 @@ package org.elasticsearch.action;
|
||||
|
||||
/**
|
||||
* A listener for action responses or failures.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public interface ActionListener<Response> {
|
||||
|
||||
/**
|
||||
* A response handler.
|
||||
* Handle action response. This response may constitute a failure or a
|
||||
* success but it is up to the listener to make that decision.
|
||||
*/
|
||||
void onResponse(Response response);
|
||||
|
||||
/**
|
||||
* A failure handler.
|
||||
* A failure caused by an exception at some phase of the task.
|
||||
*/
|
||||
void onFailure(Throwable e);
|
||||
}
|
||||
|
@ -174,12 +174,6 @@ import org.elasticsearch.action.search.TransportClearScrollAction;
|
||||
import org.elasticsearch.action.search.TransportMultiSearchAction;
|
||||
import org.elasticsearch.action.search.TransportSearchAction;
|
||||
import org.elasticsearch.action.search.TransportSearchScrollAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryThenFetchAction;
|
||||
import org.elasticsearch.action.suggest.SuggestAction;
|
||||
import org.elasticsearch.action.suggest.TransportSuggestAction;
|
||||
import org.elasticsearch.action.support.ActionFilter;
|
||||
@ -333,16 +327,8 @@ public class ActionModule extends AbstractModule {
|
||||
TransportShardMultiGetAction.class);
|
||||
registerAction(BulkAction.INSTANCE, TransportBulkAction.class,
|
||||
TransportShardBulkAction.class);
|
||||
registerAction(SearchAction.INSTANCE, TransportSearchAction.class,
|
||||
TransportSearchDfsQueryThenFetchAction.class,
|
||||
TransportSearchQueryThenFetchAction.class,
|
||||
TransportSearchDfsQueryAndFetchAction.class,
|
||||
TransportSearchQueryAndFetchAction.class
|
||||
);
|
||||
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class,
|
||||
TransportSearchScrollQueryThenFetchAction.class,
|
||||
TransportSearchScrollQueryAndFetchAction.class
|
||||
);
|
||||
registerAction(SearchAction.INSTANCE, TransportSearchAction.class);
|
||||
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class);
|
||||
registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
|
||||
registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class);
|
||||
registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class, TransportShardMultiPercolateAction.class);
|
||||
|
@ -197,9 +197,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
||||
numberOfPendingTasks = in.readInt();
|
||||
timedOut = in.readBoolean();
|
||||
numberOfInFlightFetch = in.readInt();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_7_0)) {
|
||||
delayedUnassignedShards= in.readInt();
|
||||
}
|
||||
delayedUnassignedShards= in.readInt();
|
||||
taskMaxWaitingTime = TimeValue.readTimeValue(in);
|
||||
}
|
||||
|
||||
@ -212,9 +210,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
||||
out.writeInt(numberOfPendingTasks);
|
||||
out.writeBoolean(timedOut);
|
||||
out.writeInt(numberOfInFlightFetch);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_7_0)) {
|
||||
out.writeInt(delayedUnassignedShards);
|
||||
}
|
||||
out.writeInt(delayedUnassignedShards);
|
||||
taskMaxWaitingTime.writeTo(out);
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.http.HttpInfo;
|
||||
import org.elasticsearch.ingest.core.IngestInfo;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.os.OsInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessInfo;
|
||||
@ -74,12 +75,15 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
@Nullable
|
||||
private PluginsAndModules plugins;
|
||||
|
||||
NodeInfo() {
|
||||
@Nullable
|
||||
private IngestInfo ingest;
|
||||
|
||||
public NodeInfo() {
|
||||
}
|
||||
|
||||
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings,
|
||||
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins) {
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest) {
|
||||
super(node);
|
||||
this.version = version;
|
||||
this.build = build;
|
||||
@ -92,6 +96,7 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
this.transport = transport;
|
||||
this.http = http;
|
||||
this.plugins = plugins;
|
||||
this.ingest = ingest;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -176,6 +181,11 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
return this.plugins;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public IngestInfo getIngest() {
|
||||
return ingest;
|
||||
}
|
||||
|
||||
public static NodeInfo readNodeInfo(StreamInput in) throws IOException {
|
||||
NodeInfo nodeInfo = new NodeInfo();
|
||||
nodeInfo.readFrom(in);
|
||||
@ -220,6 +230,10 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
plugins = new PluginsAndModules();
|
||||
plugins.readFrom(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
ingest = new IngestInfo();
|
||||
ingest.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -285,5 +299,11 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
out.writeBoolean(true);
|
||||
plugins.writeTo(out);
|
||||
}
|
||||
if (ingest == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
ingest.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||
private boolean transport = true;
|
||||
private boolean http = true;
|
||||
private boolean plugins = true;
|
||||
private boolean ingest = true;
|
||||
|
||||
public NodesInfoRequest() {
|
||||
}
|
||||
@ -62,6 +63,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||
transport = false;
|
||||
http = false;
|
||||
plugins = false;
|
||||
ingest = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -77,6 +79,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||
transport = true;
|
||||
http = true;
|
||||
plugins = true;
|
||||
ingest = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -202,6 +205,22 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||
return plugins;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should information about ingest be returned
|
||||
* @param ingest true if you want info
|
||||
*/
|
||||
public NodesInfoRequest ingest(boolean ingest) {
|
||||
this.ingest = ingest;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if information about ingest is requested
|
||||
*/
|
||||
public boolean ingest() {
|
||||
return ingest;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
@ -213,6 +232,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||
transport = in.readBoolean();
|
||||
http = in.readBoolean();
|
||||
plugins = in.readBoolean();
|
||||
ingest = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -226,5 +246,6 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||
out.writeBoolean(transport);
|
||||
out.writeBoolean(http);
|
||||
out.writeBoolean(plugins);
|
||||
out.writeBoolean(ingest);
|
||||
}
|
||||
}
|
||||
|
@ -110,4 +110,12 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesI
|
||||
request().plugins(plugins);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the node ingest info be returned.
|
||||
*/
|
||||
public NodesInfoRequestBuilder setIngest(boolean ingest) {
|
||||
request().ingest(ingest);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -121,6 +121,9 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
|
||||
if (nodeInfo.getPlugins() != null) {
|
||||
nodeInfo.getPlugins().toXContent(builder, params);
|
||||
}
|
||||
if (nodeInfo.getIngest() != null) {
|
||||
nodeInfo.getIngest().toXContent(builder, params);
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
||||
protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) {
|
||||
NodesInfoRequest request = nodeRequest.request;
|
||||
return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(),
|
||||
request.transport(), request.http(), request.plugins());
|
||||
request.transport(), request.http(), request.plugins(), request.ingest());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -95,7 +95,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
||||
public NodeInfoRequest() {
|
||||
}
|
||||
|
||||
NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
||||
public NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
||||
super(nodeId);
|
||||
this.request = request;
|
||||
}
|
||||
|
@ -36,14 +36,6 @@ public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
|
||||
|
||||
private String reason = DEFAULT_REASON;
|
||||
|
||||
/**
|
||||
* Cancel tasks on the specified nodes. If none are passed, all cancellable tasks on
|
||||
* all nodes will be cancelled.
|
||||
*/
|
||||
public CancelTasksRequest(String... nodesIds) {
|
||||
super(nodesIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
@ -54,7 +46,6 @@ public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(reason);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -62,12 +53,18 @@ public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
|
||||
return super.match(task) && task instanceof CancellableTask;
|
||||
}
|
||||
|
||||
public CancelTasksRequest reason(String reason) {
|
||||
/**
|
||||
* Set the reason for canceling the task.
|
||||
*/
|
||||
public CancelTasksRequest setReason(String reason) {
|
||||
this.reason = reason;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String reason() {
|
||||
/**
|
||||
* The reason for canceling the task.
|
||||
*/
|
||||
public String getReason() {
|
||||
return reason;
|
||||
}
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
|
||||
import org.elasticsearch.action.support.tasks.TransportTasksAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
@ -36,6 +35,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
@ -84,21 +84,21 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
||||
}
|
||||
|
||||
protected void processTasks(CancelTasksRequest request, Consumer<CancellableTask> operation) {
|
||||
if (request.taskId() != BaseTasksRequest.ALL_TASKS) {
|
||||
if (request.getTaskId().isSet() == false) {
|
||||
// we are only checking one task, we can optimize it
|
||||
CancellableTask task = taskManager.getCancellableTask(request.taskId());
|
||||
CancellableTask task = taskManager.getCancellableTask(request.getTaskId().getId());
|
||||
if (task != null) {
|
||||
if (request.match(task)) {
|
||||
operation.accept(task);
|
||||
} else {
|
||||
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support this operation");
|
||||
throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support this operation");
|
||||
}
|
||||
} else {
|
||||
if (taskManager.getTask(request.taskId()) != null) {
|
||||
if (taskManager.getTask(request.getTaskId().getId()) != null) {
|
||||
// The task exists, but doesn't support cancellation
|
||||
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support cancellation");
|
||||
throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support cancellation");
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.taskId());
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.getTaskId());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -113,14 +113,14 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
||||
@Override
|
||||
protected synchronized TaskInfo taskOperation(CancelTasksRequest request, CancellableTask cancellableTask) {
|
||||
final BanLock banLock = new BanLock(nodes -> removeBanOnNodes(cancellableTask, nodes));
|
||||
Set<String> childNodes = taskManager.cancel(cancellableTask, request.reason(), banLock::onTaskFinished);
|
||||
Set<String> childNodes = taskManager.cancel(cancellableTask, request.getReason(), banLock::onTaskFinished);
|
||||
if (childNodes != null) {
|
||||
if (childNodes.isEmpty()) {
|
||||
logger.trace("cancelling task {} with no children", cancellableTask.getId());
|
||||
return cancellableTask.taskInfo(clusterService.localNode(), false);
|
||||
} else {
|
||||
logger.trace("cancelling task {} with children on nodes [{}]", cancellableTask.getId(), childNodes);
|
||||
setBanOnNodes(request.reason(), cancellableTask, childNodes, banLock);
|
||||
setBanOnNodes(request.getReason(), cancellableTask, childNodes, banLock);
|
||||
return cancellableTask.taskInfo(clusterService.localNode(), false);
|
||||
}
|
||||
} else {
|
||||
@ -135,11 +135,14 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
||||
}
|
||||
|
||||
private void setBanOnNodes(String reason, CancellableTask task, Set<String> nodes, BanLock banLock) {
|
||||
sendSetBanRequest(nodes, new BanParentTaskRequest(clusterService.localNode().getId(), task.getId(), reason), banLock);
|
||||
sendSetBanRequest(nodes,
|
||||
BanParentTaskRequest.createSetBanParentTaskRequest(new TaskId(clusterService.localNode().getId(), task.getId()), reason),
|
||||
banLock);
|
||||
}
|
||||
|
||||
private void removeBanOnNodes(CancellableTask task, Set<String> nodes) {
|
||||
sendRemoveBanRequest(nodes, new BanParentTaskRequest(clusterService.localNode().getId(), task.getId()));
|
||||
sendRemoveBanRequest(nodes,
|
||||
BanParentTaskRequest.createRemoveBanParentTaskRequest(new TaskId(clusterService.localNode().getId(), task.getId())));
|
||||
}
|
||||
|
||||
private void sendSetBanRequest(Set<String> nodes, BanParentTaskRequest request, BanLock banLock) {
|
||||
@ -148,8 +151,8 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
||||
DiscoveryNode discoveryNode = clusterState.getNodes().get(node);
|
||||
if (discoveryNode != null) {
|
||||
// Check if node still in the cluster
|
||||
logger.debug("Sending ban for tasks with the parent [{}:{}] to the node [{}], ban [{}]", request.parentNodeId, request
|
||||
.parentTaskId, node, request.ban);
|
||||
logger.debug("Sending ban for tasks with the parent [{}] to the node [{}], ban [{}]", request.parentTaskId, node,
|
||||
request.ban);
|
||||
transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request,
|
||||
new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
@ -164,8 +167,8 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
||||
});
|
||||
} else {
|
||||
banLock.onBanSet();
|
||||
logger.debug("Cannot send ban for tasks with the parent [{}:{}] to the node [{}] - the node no longer in the cluster",
|
||||
request.parentNodeId, request.parentTaskId, node);
|
||||
logger.debug("Cannot send ban for tasks with the parent [{}] to the node [{}] - the node no longer in the cluster",
|
||||
request.parentTaskId, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -176,13 +179,12 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
||||
DiscoveryNode discoveryNode = clusterState.getNodes().get(node);
|
||||
if (discoveryNode != null) {
|
||||
// Check if node still in the cluster
|
||||
logger.debug("Sending remove ban for tasks with the parent [{}:{}] to the node [{}]", request.parentNodeId,
|
||||
request.parentTaskId, node);
|
||||
logger.debug("Sending remove ban for tasks with the parent [{}] to the node [{}]", request.parentTaskId, node);
|
||||
transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request, EmptyTransportResponseHandler
|
||||
.INSTANCE_SAME);
|
||||
} else {
|
||||
logger.debug("Cannot send remove ban request for tasks with the parent [{}:{}] to the node [{}] - the node no longer in " +
|
||||
"the cluster", request.parentNodeId, request.parentTaskId, node);
|
||||
logger.debug("Cannot send remove ban request for tasks with the parent [{}] to the node [{}] - the node no longer in " +
|
||||
"the cluster", request.parentTaskId, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -218,23 +220,27 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
||||
|
||||
private static class BanParentTaskRequest extends TransportRequest {
|
||||
|
||||
private String parentNodeId;
|
||||
|
||||
private long parentTaskId;
|
||||
private TaskId parentTaskId;
|
||||
|
||||
private boolean ban;
|
||||
|
||||
private String reason;
|
||||
|
||||
BanParentTaskRequest(String parentNodeId, long parentTaskId, String reason) {
|
||||
this.parentNodeId = parentNodeId;
|
||||
static BanParentTaskRequest createSetBanParentTaskRequest(TaskId parentTaskId, String reason) {
|
||||
return new BanParentTaskRequest(parentTaskId, reason);
|
||||
}
|
||||
|
||||
static BanParentTaskRequest createRemoveBanParentTaskRequest(TaskId parentTaskId) {
|
||||
return new BanParentTaskRequest(parentTaskId);
|
||||
}
|
||||
|
||||
private BanParentTaskRequest(TaskId parentTaskId, String reason) {
|
||||
this.parentTaskId = parentTaskId;
|
||||
this.ban = true;
|
||||
this.reason = reason;
|
||||
}
|
||||
|
||||
BanParentTaskRequest(String parentNodeId, long parentTaskId) {
|
||||
this.parentNodeId = parentNodeId;
|
||||
private BanParentTaskRequest(TaskId parentTaskId) {
|
||||
this.parentTaskId = parentTaskId;
|
||||
this.ban = false;
|
||||
}
|
||||
@ -245,8 +251,7 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
parentNodeId = in.readString();
|
||||
parentTaskId = in.readLong();
|
||||
parentTaskId = new TaskId(in);
|
||||
ban = in.readBoolean();
|
||||
if (ban) {
|
||||
reason = in.readString();
|
||||
@ -256,8 +261,7 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(parentNodeId);
|
||||
out.writeLong(parentTaskId);
|
||||
parentTaskId.writeTo(out);
|
||||
out.writeBoolean(ban);
|
||||
if (ban) {
|
||||
out.writeString(reason);
|
||||
@ -269,13 +273,13 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
||||
@Override
|
||||
public void messageReceived(final BanParentTaskRequest request, final TransportChannel channel) throws Exception {
|
||||
if (request.ban) {
|
||||
logger.debug("Received ban for the parent [{}:{}] on the node [{}], reason: [{}]", request.parentNodeId, request
|
||||
.parentTaskId, clusterService.localNode().getId(), request.reason);
|
||||
taskManager.setBan(request.parentNodeId, request.parentTaskId, request.reason);
|
||||
logger.debug("Received ban for the parent [{}] on the node [{}], reason: [{}]", request.parentTaskId,
|
||||
clusterService.localNode().getId(), request.reason);
|
||||
taskManager.setBan(request.parentTaskId, request.reason);
|
||||
} else {
|
||||
logger.debug("Removing ban for the parent [{}:{}] on the node [{}]", request.parentNodeId, request.parentTaskId,
|
||||
logger.debug("Removing ban for the parent [{}] on the node [{}]", request.parentTaskId,
|
||||
clusterService.localNode().getId());
|
||||
taskManager.removeBan(request.parentNodeId, request.parentTaskId);
|
||||
taskManager.removeBan(request.parentTaskId);
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
|
@ -31,27 +31,35 @@ import java.io.IOException;
|
||||
public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> {
|
||||
|
||||
private boolean detailed = false;
|
||||
private boolean waitForCompletion = false;
|
||||
|
||||
/**
|
||||
* Get information from nodes based on the nodes ids specified. If none are passed, information
|
||||
* for all nodes will be returned.
|
||||
* Should the detailed task information be returned.
|
||||
*/
|
||||
public ListTasksRequest(String... nodesIds) {
|
||||
super(nodesIds);
|
||||
public boolean getDetailed() {
|
||||
return this.detailed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the detailed task information be returned.
|
||||
*/
|
||||
public boolean detailed() {
|
||||
return this.detailed;
|
||||
public ListTasksRequest setDetailed(boolean detailed) {
|
||||
this.detailed = detailed;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the node settings be returned.
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public ListTasksRequest detailed(boolean detailed) {
|
||||
this.detailed = detailed;
|
||||
public boolean getWaitForCompletion() {
|
||||
return waitForCompletion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public ListTasksRequest setWaitForCompletion(boolean waitForCompletion) {
|
||||
this.waitForCompletion = waitForCompletion;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -59,11 +67,13 @@ public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> {
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
detailed = in.readBoolean();
|
||||
waitForCompletion = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(detailed);
|
||||
out.writeBoolean(waitForCompletion);
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,15 @@ public class ListTasksRequestBuilder extends TasksRequestBuilder<ListTasksReques
|
||||
* Should detailed task information be returned.
|
||||
*/
|
||||
public ListTasksRequestBuilder setDetailed(boolean detailed) {
|
||||
request.detailed(detailed);
|
||||
request.setDetailed(detailed);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public final ListTasksRequestBuilder setWaitForCompletion(boolean waitForCompletion) {
|
||||
request.setWaitForCompletion(waitForCompletion);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -138,11 +138,13 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.startArray("tasks");
|
||||
builder.startObject("tasks");
|
||||
for(TaskInfo task : entry.getValue()) {
|
||||
builder.startObject(task.getTaskId().toString(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
task.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
@ -26,8 +26,10 @@ import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Information about a currently running task.
|
||||
@ -41,7 +43,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
|
||||
private final DiscoveryNode node;
|
||||
|
||||
private final long id;
|
||||
private final TaskId taskId;
|
||||
|
||||
private final String type;
|
||||
|
||||
@ -49,30 +51,30 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
|
||||
private final String description;
|
||||
|
||||
private final long startTime;
|
||||
|
||||
private final long runningTimeNanos;
|
||||
|
||||
private final Task.Status status;
|
||||
|
||||
private final String parentNode;
|
||||
private final TaskId parentTaskId;
|
||||
|
||||
private final long parentId;
|
||||
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status) {
|
||||
this(node, id, type, action, description, status, null, -1L);
|
||||
}
|
||||
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status, String parentNode, long parentId) {
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status, long startTime,
|
||||
long runningTimeNanos, TaskId parentTaskId) {
|
||||
this.node = node;
|
||||
this.id = id;
|
||||
this.taskId = new TaskId(node.getId(), id);
|
||||
this.type = type;
|
||||
this.action = action;
|
||||
this.description = description;
|
||||
this.status = status;
|
||||
this.parentNode = parentNode;
|
||||
this.parentId = parentId;
|
||||
this.startTime = startTime;
|
||||
this.runningTimeNanos = runningTimeNanos;
|
||||
this.parentTaskId = parentTaskId;
|
||||
}
|
||||
|
||||
public TaskInfo(StreamInput in) throws IOException {
|
||||
node = DiscoveryNode.readNode(in);
|
||||
id = in.readLong();
|
||||
taskId = new TaskId(node.getId(), in.readLong());
|
||||
type = in.readString();
|
||||
action = in.readString();
|
||||
description = in.readOptionalString();
|
||||
@ -81,8 +83,13 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
} else {
|
||||
status = null;
|
||||
}
|
||||
parentNode = in.readOptionalString();
|
||||
parentId = in.readLong();
|
||||
startTime = in.readLong();
|
||||
runningTimeNanos = in.readLong();
|
||||
parentTaskId = new TaskId(in);
|
||||
}
|
||||
|
||||
public TaskId getTaskId() {
|
||||
return taskId;
|
||||
}
|
||||
|
||||
public DiscoveryNode getNode() {
|
||||
@ -90,7 +97,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
return id;
|
||||
return taskId.getId();
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
@ -113,12 +120,25 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
return status;
|
||||
}
|
||||
|
||||
public String getParentNode() {
|
||||
return parentNode;
|
||||
/**
|
||||
* Returns the task start time
|
||||
*/
|
||||
public long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
public long getParentId() {
|
||||
return parentId;
|
||||
/**
|
||||
* Returns the task running time
|
||||
*/
|
||||
public long getRunningTimeNanos() {
|
||||
return runningTimeNanos;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the parent task id
|
||||
*/
|
||||
public TaskId getParentTaskId() {
|
||||
return parentTaskId;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -129,7 +149,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
out.writeLong(id);
|
||||
out.writeLong(taskId.getId());
|
||||
out.writeString(type);
|
||||
out.writeString(action);
|
||||
out.writeOptionalString(description);
|
||||
@ -139,15 +159,15 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
out.writeOptionalString(parentNode);
|
||||
out.writeLong(parentId);
|
||||
out.writeLong(startTime);
|
||||
out.writeLong(runningTimeNanos);
|
||||
parentTaskId.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("node", node.getId());
|
||||
builder.field("id", id);
|
||||
builder.field("id", taskId.getId());
|
||||
builder.field("type", type);
|
||||
builder.field("action", action);
|
||||
if (status != null) {
|
||||
@ -156,11 +176,11 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
if (description != null) {
|
||||
builder.field("description", description);
|
||||
}
|
||||
if (parentNode != null) {
|
||||
builder.field("parent_node", parentNode);
|
||||
builder.field("parent_id", parentId);
|
||||
builder.dateValueField("start_time_in_millis", "start_time", startTime);
|
||||
builder.timeValueField("running_time_in_nanos", "running_time", runningTimeNanos, TimeUnit.NANOSECONDS);
|
||||
if (parentTaskId.isSet() == false) {
|
||||
builder.field("parent_task_id", parentTaskId.toString());
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
@ -29,18 +31,24 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportListTasksAction extends TransportTasksAction<Task, ListTasksRequest, ListTasksResponse, TaskInfo> {
|
||||
private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100);
|
||||
private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30);
|
||||
|
||||
@Inject
|
||||
public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
@ -59,7 +67,34 @@ public class TransportListTasksAction extends TransportTasksAction<Task, ListTas
|
||||
|
||||
@Override
|
||||
protected TaskInfo taskOperation(ListTasksRequest request, Task task) {
|
||||
return task.taskInfo(clusterService.localNode(), request.detailed());
|
||||
return task.taskInfo(clusterService.localNode(), request.getDetailed());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processTasks(ListTasksRequest request, Consumer<Task> operation) {
|
||||
if (false == request.getWaitForCompletion()) {
|
||||
super.processTasks(request, operation);
|
||||
return;
|
||||
}
|
||||
// If we should wait for completion then we have to intercept every found task and wait for it to leave the manager.
|
||||
TimeValue timeout = request.getTimeout();
|
||||
if (timeout == null) {
|
||||
timeout = DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT;
|
||||
}
|
||||
long timeoutTime = System.nanoTime() + timeout.nanos();
|
||||
super.processTasks(request, operation.andThen((Task t) -> {
|
||||
while (System.nanoTime() - timeoutTime < 0) {
|
||||
if (taskManager.getTask(t.getId()) == null) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis());
|
||||
} catch (InterruptedException e) {
|
||||
throw new ElasticsearchException("Interrupted waiting for completion of [{}]", e, t);
|
||||
}
|
||||
}
|
||||
throw new ElasticsearchTimeoutException("Timed out waiting for completion of [{}]", t);
|
||||
}));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -98,7 +98,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
||||
|
||||
@Override
|
||||
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
|
||||
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true);
|
||||
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true, false);
|
||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false);
|
||||
List<ShardStats> shardsStats = new ArrayList<>();
|
||||
for (IndexService indexService : indicesService) {
|
||||
|
@ -27,11 +27,14 @@ import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.PendingClusterTask;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class TransportPendingClusterTasksAction extends TransportMasterNodeReadAction<PendingClusterTasksRequest, PendingClusterTasksResponse> {
|
||||
@ -63,6 +66,9 @@ public class TransportPendingClusterTasksAction extends TransportMasterNodeReadA
|
||||
|
||||
@Override
|
||||
protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener<PendingClusterTasksResponse> listener) {
|
||||
listener.onResponse(new PendingClusterTasksResponse(clusterService.pendingTasks()));
|
||||
logger.trace("fetching pending tasks from cluster service");
|
||||
final List<PendingClusterTask> pendingTasks = clusterService.pendingTasks();
|
||||
logger.trace("done fetching pending tasks from cluster service");
|
||||
listener.onResponse(new PendingClusterTasksResponse(pendingTasks));
|
||||
}
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
||||
|
||||
@Override
|
||||
protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) {
|
||||
IndexService service = indicesService.indexService(shardRouting.getIndexName());
|
||||
IndexService service = indicesService.indexService(shardRouting.index());
|
||||
if (service != null) {
|
||||
IndexShard shard = service.getShardOrNull(shardRouting.id());
|
||||
boolean clearedAtLeastOne = false;
|
||||
|
@ -93,7 +93,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi
|
||||
|
||||
@Override
|
||||
protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndexName());
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardRouting.index());
|
||||
IndexShard indexShard = indexService.getShard(shardRouting.id());
|
||||
return new ShardSegments(indexShard.routingEntry(), indexShard.segments(request.verbose()));
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
completion = indexShard.completionStats(flags.completionDataFields());
|
||||
break;
|
||||
case Segments:
|
||||
segments = indexShard.segmentStats();
|
||||
segments = indexShard.segmentStats(flags.includeSegmentFileSizes());
|
||||
break;
|
||||
case Percolate:
|
||||
percolate = indexShard.percolateStats();
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
@ -38,6 +39,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
private String[] groups = null;
|
||||
private String[] fieldDataFields = null;
|
||||
private String[] completionDataFields = null;
|
||||
private boolean includeSegmentFileSizes = false;
|
||||
|
||||
|
||||
/**
|
||||
@ -62,6 +64,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
groups = null;
|
||||
fieldDataFields = null;
|
||||
completionDataFields = null;
|
||||
includeSegmentFileSizes = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -74,6 +77,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
groups = null;
|
||||
fieldDataFields = null;
|
||||
completionDataFields = null;
|
||||
includeSegmentFileSizes = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -137,6 +141,15 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
return this.completionDataFields;
|
||||
}
|
||||
|
||||
public CommonStatsFlags includeSegmentFileSizes(boolean includeSegmentFileSizes) {
|
||||
this.includeSegmentFileSizes = includeSegmentFileSizes;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean includeSegmentFileSizes() {
|
||||
return this.includeSegmentFileSizes;
|
||||
}
|
||||
|
||||
public boolean isSet(Flag flag) {
|
||||
return flags.contains(flag);
|
||||
}
|
||||
@ -177,6 +190,9 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
out.writeStringArrayNullable(groups);
|
||||
out.writeStringArrayNullable(fieldDataFields);
|
||||
out.writeStringArrayNullable(completionDataFields);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
out.writeBoolean(includeSegmentFileSizes);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -192,6 +208,11 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
groups = in.readStringArray();
|
||||
fieldDataFields = in.readStringArray();
|
||||
completionDataFields = in.readStringArray();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
includeSegmentFileSizes = in.readBoolean();
|
||||
} else {
|
||||
includeSegmentFileSizes = false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -265,6 +265,15 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
|
||||
return flags.isSet(Flag.Recovery);
|
||||
}
|
||||
|
||||
public boolean includeSegmentFileSizes() {
|
||||
return flags.includeSegmentFileSizes();
|
||||
}
|
||||
|
||||
public IndicesStatsRequest includeSegmentFileSizes(boolean includeSegmentFileSizes) {
|
||||
flags.includeSegmentFileSizes(includeSegmentFileSizes);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
|
@ -166,4 +166,9 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
|
||||
request.recovery(recovery);
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndicesStatsRequestBuilder setIncludeSegmentFileSizes(boolean includeSegmentFileSizes) {
|
||||
request.includeSegmentFileSizes(includeSegmentFileSizes);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -144,6 +144,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
|
||||
}
|
||||
if (request.segments()) {
|
||||
flags.set(CommonStatsFlags.Flag.Segments);
|
||||
flags.includeSegmentFileSizes(request.includeSegmentFileSizes());
|
||||
}
|
||||
if (request.completion()) {
|
||||
flags.set(CommonStatsFlags.Flag.Completion);
|
||||
|
@ -49,6 +49,7 @@ import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.fetch.FetchPhase;
|
||||
import org.elasticsearch.search.internal.DefaultSearchContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
|
||||
@ -76,17 +77,20 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
|
||||
private final FetchPhase fetchPhase;
|
||||
|
||||
@Inject
|
||||
public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService,
|
||||
ScriptService scriptService, PageCacheRecycler pageCacheRecycler,
|
||||
BigArrays bigArrays, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
TransportService transportService, IndicesService indicesService, ScriptService scriptService,
|
||||
PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, FetchPhase fetchPhase) {
|
||||
super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters,
|
||||
indexNameExpressionResolver, ValidateQueryRequest::new, ShardValidateQueryRequest::new, ThreadPool.Names.SEARCH);
|
||||
this.indicesService = indicesService;
|
||||
this.scriptService = scriptService;
|
||||
this.pageCacheRecycler = pageCacheRecycler;
|
||||
this.bigArrays = bigArrays;
|
||||
this.fetchPhase = fetchPhase;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -173,11 +177,9 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
||||
Engine.Searcher searcher = indexShard.acquireSearcher("validate_query");
|
||||
|
||||
DefaultSearchContext searchContext = new DefaultSearchContext(0,
|
||||
new ShardSearchLocalRequest(request.types(), request.nowInMillis(), request.filteringAliases()),
|
||||
null, searcher, indexService, indexShard,
|
||||
scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
|
||||
SearchService.NO_TIMEOUT
|
||||
);
|
||||
new ShardSearchLocalRequest(request.types(), request.nowInMillis(), request.filteringAliases()), null, searcher,
|
||||
indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(),
|
||||
parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
|
||||
SearchContext.setCurrent(searchContext);
|
||||
try {
|
||||
searchContext.parsedQuery(queryShardContext.toQuery(request.query()));
|
||||
|
@ -28,7 +28,9 @@ import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
@ -76,7 +78,15 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
||||
/**
|
||||
* Represents a failure.
|
||||
*/
|
||||
public static class Failure {
|
||||
public static class Failure implements Writeable<Failure>, ToXContent {
|
||||
static final String INDEX_FIELD = "index";
|
||||
static final String TYPE_FIELD = "type";
|
||||
static final String ID_FIELD = "id";
|
||||
static final String CAUSE_FIELD = "cause";
|
||||
static final String STATUS_FIELD = "status";
|
||||
|
||||
public static final Failure PROTOTYPE = new Failure(null, null, null, null);
|
||||
|
||||
private final String index;
|
||||
private final String type;
|
||||
private final String id;
|
||||
@ -126,9 +136,39 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
||||
return this.status;
|
||||
}
|
||||
|
||||
/**
|
||||
* The actual cause of the failure.
|
||||
*/
|
||||
public Throwable getCause() {
|
||||
return cause;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Failure readFrom(StreamInput in) throws IOException {
|
||||
return new Failure(in.readString(), in.readString(), in.readOptionalString(), in.readThrowable());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(getIndex());
|
||||
out.writeString(getType());
|
||||
out.writeOptionalString(getId());
|
||||
out.writeThrowable(getCause());
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(INDEX_FIELD, index);
|
||||
builder.field(TYPE_FIELD, type);
|
||||
if (id != null) {
|
||||
builder.field(ID_FIELD, id);
|
||||
}
|
||||
builder.startObject(CAUSE_FIELD);
|
||||
ElasticsearchException.toXContent(builder, params, cause);
|
||||
builder.endObject();
|
||||
builder.field(STATUS_FIELD, status.getStatus());
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
private int id;
|
||||
@ -265,11 +305,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
||||
}
|
||||
|
||||
if (in.readBoolean()) {
|
||||
String fIndex = in.readString();
|
||||
String fType = in.readString();
|
||||
String fId = in.readOptionalString();
|
||||
Throwable throwable = in.readThrowable();
|
||||
failure = new Failure(fIndex, fType, fId, throwable);
|
||||
failure = Failure.PROTOTYPE.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
@ -294,10 +330,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeString(failure.getIndex());
|
||||
out.writeString(failure.getType());
|
||||
out.writeOptionalString(failure.getId());
|
||||
out.writeThrowable(failure.getCause());
|
||||
failure.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -35,31 +35,53 @@ import java.util.Iterator;
|
||||
*/
|
||||
public class BulkResponse extends ActionResponse implements Iterable<BulkItemResponse> {
|
||||
|
||||
public final static long NO_INGEST_TOOK = -1L;
|
||||
|
||||
private BulkItemResponse[] responses;
|
||||
private long tookInMillis;
|
||||
private long ingestTookInMillis;
|
||||
|
||||
BulkResponse() {
|
||||
}
|
||||
|
||||
public BulkResponse(BulkItemResponse[] responses, long tookInMillis) {
|
||||
this(responses, tookInMillis, NO_INGEST_TOOK);
|
||||
}
|
||||
|
||||
public BulkResponse(BulkItemResponse[] responses, long tookInMillis, long ingestTookInMillis) {
|
||||
this.responses = responses;
|
||||
this.tookInMillis = tookInMillis;
|
||||
this.ingestTookInMillis = ingestTookInMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* How long the bulk execution took.
|
||||
* How long the bulk execution took. Excluding ingest preprocessing.
|
||||
*/
|
||||
public TimeValue getTook() {
|
||||
return new TimeValue(tookInMillis);
|
||||
}
|
||||
|
||||
/**
|
||||
* How long the bulk execution took in milliseconds.
|
||||
* How long the bulk execution took in milliseconds. Excluding ingest preprocessing.
|
||||
*/
|
||||
public long getTookInMillis() {
|
||||
return tookInMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* If ingest is enabled returns the bulk ingest preprocessing time, otherwise 0 is returned.
|
||||
*/
|
||||
public TimeValue getIngestTook() {
|
||||
return new TimeValue(ingestTookInMillis);
|
||||
}
|
||||
|
||||
/**
|
||||
* If ingest is enabled returns the bulk ingest preprocessing time. in milliseconds, otherwise -1 is returned.
|
||||
*/
|
||||
public long getIngestTookInMillis() {
|
||||
return ingestTookInMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Has anything failed with the execution.
|
||||
*/
|
||||
@ -106,6 +128,7 @@ public class BulkResponse extends ActionResponse implements Iterable<BulkItemRes
|
||||
responses[i] = BulkItemResponse.readBulkItem(in);
|
||||
}
|
||||
tookInMillis = in.readVLong();
|
||||
ingestTookInMillis = in.readZLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -116,5 +139,6 @@ public class BulkResponse extends ActionResponse implements Iterable<BulkItemRes
|
||||
response.writeTo(out);
|
||||
}
|
||||
out.writeVLong(tookInMillis);
|
||||
out.writeZLong(ingestTookInMillis);
|
||||
}
|
||||
}
|
||||
|
@ -94,6 +94,12 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "shard bulk {" + super.toString() + "}";
|
||||
// This is included in error messages so we'll try to make it somewhat user friendly.
|
||||
StringBuilder b = new StringBuilder("BulkShardRequest to [");
|
||||
b.append(index).append("] containing [").append(items.length).append("] requests");
|
||||
if (refresh) {
|
||||
b.append(" and a refresh");
|
||||
}
|
||||
return b.toString();
|
||||
}
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ import java.util.function.Predicate;
|
||||
/**
|
||||
* Encapsulates synchronous and asynchronous retry logic.
|
||||
*/
|
||||
class Retry {
|
||||
public class Retry {
|
||||
private final Class<? extends Throwable> retryOnThrowable;
|
||||
|
||||
private BackoffPolicy backoffPolicy;
|
||||
|
@ -30,10 +30,12 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.TransportDeleteAction;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.update.TransportUpdateAction;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
@ -42,12 +44,9 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
@ -61,8 +60,11 @@ import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
/**
|
||||
*
|
||||
@ -74,27 +76,41 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
private final ClusterService clusterService;
|
||||
private final TransportShardBulkAction shardBulkAction;
|
||||
private final TransportCreateIndexAction createIndexAction;
|
||||
private final LongSupplier relativeTimeProvider;
|
||||
|
||||
@Inject
|
||||
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
|
||||
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex) {
|
||||
this(settings, threadPool, transportService, clusterService,
|
||||
shardBulkAction, createIndexAction,
|
||||
actionFilters, indexNameExpressionResolver,
|
||||
autoCreateIndex,
|
||||
System::nanoTime);
|
||||
}
|
||||
|
||||
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
|
||||
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex, LongSupplier relativeTimeProvider) {
|
||||
super(settings, BulkAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, BulkRequest::new);
|
||||
Objects.requireNonNull(relativeTimeProvider);
|
||||
this.clusterService = clusterService;
|
||||
this.shardBulkAction = shardBulkAction;
|
||||
this.createIndexAction = createIndexAction;
|
||||
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
this.allowIdGeneration = this.settings.getAsBoolean("action.bulk.action.allow_id_generation", true);
|
||||
this.relativeTimeProvider = relativeTimeProvider;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {
|
||||
final long startTime = System.currentTimeMillis();
|
||||
final long startTime = relativeTime();
|
||||
final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());
|
||||
|
||||
if (autoCreateIndex.needToCheck()) {
|
||||
if (needToCheck()) {
|
||||
// Keep track of all unique indices and all unique types per index for the create index requests:
|
||||
final Map<String, Set<String>> indicesAndTypes = new HashMap<>();
|
||||
for (ActionRequest request : bulkRequest.requests) {
|
||||
@ -113,7 +129,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
ClusterState state = clusterService.state();
|
||||
for (Map.Entry<String, Set<String>> entry : indicesAndTypes.entrySet()) {
|
||||
final String index = entry.getKey();
|
||||
if (autoCreateIndex.shouldAutoCreate(index, state)) {
|
||||
if (shouldAutoCreate(index, state)) {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
createIndexRequest.index(index);
|
||||
for (String type : entry.getValue()) {
|
||||
@ -164,6 +180,14 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
}
|
||||
}
|
||||
|
||||
boolean needToCheck() {
|
||||
return autoCreateIndex.needToCheck();
|
||||
}
|
||||
|
||||
boolean shouldAutoCreate(String index, ClusterState state) {
|
||||
return autoCreateIndex.shouldAutoCreate(index, state);
|
||||
}
|
||||
|
||||
private boolean setResponseFailureIfIndexMatches(AtomicArray<BulkItemResponse> responses, int idx, ActionRequest request, String index, Throwable e) {
|
||||
if (request instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
@ -196,16 +220,15 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
* @see #doExecute(BulkRequest, org.elasticsearch.action.ActionListener)
|
||||
*/
|
||||
public void executeBulk(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {
|
||||
final long startTime = System.currentTimeMillis();
|
||||
executeBulk(bulkRequest, startTime, listener, new AtomicArray<BulkItemResponse>(bulkRequest.requests.size()));
|
||||
final long startTimeNanos = relativeTime();
|
||||
executeBulk(bulkRequest, startTimeNanos, listener, new AtomicArray<>(bulkRequest.requests.size()));
|
||||
}
|
||||
|
||||
private final long buildTookInMillis(long startTime) {
|
||||
// protect ourselves against time going backwards
|
||||
return Math.max(1, System.currentTimeMillis() - startTime);
|
||||
private long buildTookInMillis(long startTimeNanos) {
|
||||
return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos);
|
||||
}
|
||||
|
||||
private void executeBulk(final BulkRequest bulkRequest, final long startTime, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses ) {
|
||||
void executeBulk(final BulkRequest bulkRequest, final long startTimeNanos, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses ) {
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
// TODO use timeout to wait here if its blocked...
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.WRITE);
|
||||
@ -214,33 +237,53 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
MetaData metaData = clusterState.metaData();
|
||||
for (int i = 0; i < bulkRequest.requests.size(); i++) {
|
||||
ActionRequest request = bulkRequest.requests.get(i);
|
||||
if (request instanceof DocumentRequest) {
|
||||
DocumentRequest req = (DocumentRequest) request;
|
||||
|
||||
if (addFailureIfIndexIsUnavailable(req, bulkRequest, responses, i, concreteIndices, metaData)) {
|
||||
continue;
|
||||
//the request can only be null because we set it to null in the previous step, so it gets ignored
|
||||
if (request == null) {
|
||||
continue;
|
||||
}
|
||||
DocumentRequest documentRequest = (DocumentRequest) request;
|
||||
if (addFailureIfIndexIsUnavailable(documentRequest, bulkRequest, responses, i, concreteIndices, metaData)) {
|
||||
continue;
|
||||
}
|
||||
String concreteIndex = concreteIndices.resolveIfAbsent(documentRequest);
|
||||
if (request instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
MappingMetaData mappingMd = null;
|
||||
if (metaData.hasIndex(concreteIndex)) {
|
||||
mappingMd = metaData.index(concreteIndex).mappingOrDefault(indexRequest.type());
|
||||
}
|
||||
try {
|
||||
indexRequest.process(metaData, mappingMd, allowIdGeneration, concreteIndex);
|
||||
} catch (ElasticsearchParseException | RoutingMissingException e) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, indexRequest.type(), indexRequest.id(), e);
|
||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "index", failure);
|
||||
responses.set(i, bulkItemResponse);
|
||||
// make sure the request gets never processed again
|
||||
bulkRequest.requests.set(i, null);
|
||||
}
|
||||
} else if (request instanceof DeleteRequest) {
|
||||
try {
|
||||
TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex, (DeleteRequest)request);
|
||||
} catch(RoutingMissingException e) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, documentRequest.type(), documentRequest.id(), e);
|
||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "delete", failure);
|
||||
responses.set(i, bulkItemResponse);
|
||||
// make sure the request gets never processed again
|
||||
bulkRequest.requests.set(i, null);
|
||||
}
|
||||
|
||||
String concreteIndex = concreteIndices.resolveIfAbsent(req);
|
||||
if (request instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
MappingMetaData mappingMd = null;
|
||||
if (metaData.hasIndex(concreteIndex)) {
|
||||
mappingMd = metaData.index(concreteIndex).mappingOrDefault(indexRequest.type());
|
||||
}
|
||||
try {
|
||||
indexRequest.process(metaData, mappingMd, allowIdGeneration, concreteIndex);
|
||||
} catch (ElasticsearchParseException | RoutingMissingException e) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, indexRequest.type(), indexRequest.id(), e);
|
||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "index", failure);
|
||||
responses.set(i, bulkItemResponse);
|
||||
// make sure the request gets never processed again
|
||||
bulkRequest.requests.set(i, null);
|
||||
}
|
||||
} else {
|
||||
concreteIndices.resolveIfAbsent(req);
|
||||
req.routing(clusterState.metaData().resolveIndexRouting(req.parent(), req.routing(), req.index()));
|
||||
} else if (request instanceof UpdateRequest) {
|
||||
try {
|
||||
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex, (UpdateRequest)request);
|
||||
} catch(RoutingMissingException e) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, documentRequest.type(), documentRequest.id(), e);
|
||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "update", failure);
|
||||
responses.set(i, bulkItemResponse);
|
||||
// make sure the request gets never processed again
|
||||
bulkRequest.requests.set(i, null);
|
||||
}
|
||||
} else {
|
||||
throw new AssertionError("request type not supported: [" + request.getClass().getName() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@ -262,37 +305,16 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
} else if (request instanceof DeleteRequest) {
|
||||
DeleteRequest deleteRequest = (DeleteRequest) request;
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index());
|
||||
MappingMetaData mappingMd = clusterState.metaData().index(concreteIndex).mappingOrDefault(deleteRequest.type());
|
||||
if (mappingMd != null && mappingMd.routing().required() && deleteRequest.routing() == null) {
|
||||
// if routing is required, and no routing on the delete request, we need to broadcast it....
|
||||
GroupShardsIterator groupShards = clusterService.operationRouting().broadcastDeleteShards(clusterState, concreteIndex);
|
||||
for (ShardIterator shardIt : groupShards) {
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardIt.shardId());
|
||||
if (list == null) {
|
||||
list = new ArrayList<>();
|
||||
requestsByShard.put(shardIt.shardId(), list);
|
||||
}
|
||||
list.add(new BulkItemRequest(i, deleteRequest));
|
||||
}
|
||||
} else {
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
list = new ArrayList<>();
|
||||
requestsByShard.put(shardId, list);
|
||||
}
|
||||
list.add(new BulkItemRequest(i, request));
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
list = new ArrayList<>();
|
||||
requestsByShard.put(shardId, list);
|
||||
}
|
||||
list.add(new BulkItemRequest(i, request));
|
||||
} else if (request instanceof UpdateRequest) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) request;
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index());
|
||||
MappingMetaData mappingMd = clusterState.metaData().index(concreteIndex).mappingOrDefault(updateRequest.type());
|
||||
if (mappingMd != null && mappingMd.routing().required() && updateRequest.routing() == null) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(updateRequest.index(), updateRequest.type(),
|
||||
updateRequest.id(), new IllegalArgumentException("routing is required for this item"));
|
||||
responses.set(i, new BulkItemResponse(i, updateRequest.type(), failure));
|
||||
continue;
|
||||
}
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, updateRequest.type(), updateRequest.id(), updateRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
@ -304,7 +326,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
}
|
||||
|
||||
if (requestsByShard.isEmpty()) {
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime)));
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -354,7 +376,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime)));
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)));
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -400,7 +422,6 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
private static class ConcreteIndices {
|
||||
private final ClusterState state;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
@ -424,4 +445,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
return concreteIndex;
|
||||
}
|
||||
}
|
||||
|
||||
private long relativeTime() {
|
||||
return relativeTimeProvider.getAsLong();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.index.TransportIndexAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.action.update.UpdateHelper;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
@ -46,6 +47,7 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
@ -103,202 +105,16 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
||||
|
||||
@Override
|
||||
protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(MetaData metaData, BulkShardRequest request) {
|
||||
final IndexService indexService = indicesService.indexServiceSafe(request.index());
|
||||
final IndexShard indexShard = indexService.getShard(request.shardId().id());
|
||||
ShardId shardId = request.shardId();
|
||||
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
final IndexShard indexShard = indexService.getShard(shardId.getId());
|
||||
|
||||
long[] preVersions = new long[request.items().length];
|
||||
VersionType[] preVersionTypes = new VersionType[request.items().length];
|
||||
Translog.Location location = null;
|
||||
for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) {
|
||||
BulkItemRequest item = request.items()[requestIndex];
|
||||
if (item.request() instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) item.request();
|
||||
preVersions[requestIndex] = indexRequest.version();
|
||||
preVersionTypes[requestIndex] = indexRequest.versionType();
|
||||
try {
|
||||
WriteResult<IndexResponse> result = shardIndexOperation(request, indexRequest, metaData, indexShard, true);
|
||||
location = locationToSync(location, result.location);
|
||||
// add the response
|
||||
IndexResponse indexResponse = result.response();
|
||||
setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse));
|
||||
} catch (Throwable e) {
|
||||
// nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed.
|
||||
// some operations were already perform and have a seqno assigned. we shouldn't just reindex them
|
||||
// if we have a pending mapping update
|
||||
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
|
||||
if (retryPrimaryException(e)) {
|
||||
// restore updated versions...
|
||||
for (int j = 0; j < requestIndex; j++) {
|
||||
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
|
||||
}
|
||||
throw (ElasticsearchException) e;
|
||||
}
|
||||
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
|
||||
logger.trace("{} failed to execute bulk item (index) {}", e, request.shardId(), indexRequest);
|
||||
} else {
|
||||
logger.debug("{} failed to execute bulk item (index) {}", e, request.shardId(), indexRequest);
|
||||
}
|
||||
// if its a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
|
||||
// then just use the response we got from the successful execution
|
||||
if (item.getPrimaryResponse() != null && isConflictException(e)) {
|
||||
setResponse(item, item.getPrimaryResponse());
|
||||
} else {
|
||||
setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(),
|
||||
new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e)));
|
||||
}
|
||||
}
|
||||
} else if (item.request() instanceof DeleteRequest) {
|
||||
DeleteRequest deleteRequest = (DeleteRequest) item.request();
|
||||
preVersions[requestIndex] = deleteRequest.version();
|
||||
preVersionTypes[requestIndex] = deleteRequest.versionType();
|
||||
|
||||
try {
|
||||
// add the response
|
||||
final WriteResult<DeleteResponse> writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard);
|
||||
DeleteResponse deleteResponse = writeResult.response();
|
||||
location = locationToSync(location, writeResult.location);
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse));
|
||||
} catch (Throwable e) {
|
||||
// nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed.
|
||||
// some operations were already perform and have a seqno assigned. we shouldn't just reindex them
|
||||
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
|
||||
if (retryPrimaryException(e)) {
|
||||
// restore updated versions...
|
||||
for (int j = 0; j < requestIndex; j++) {
|
||||
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
|
||||
}
|
||||
throw (ElasticsearchException) e;
|
||||
}
|
||||
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
|
||||
logger.trace("{} failed to execute bulk item (delete) {}", e, request.shardId(), deleteRequest);
|
||||
} else {
|
||||
logger.debug("{} failed to execute bulk item (delete) {}", e, request.shardId(), deleteRequest);
|
||||
}
|
||||
// if its a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
|
||||
// then just use the response we got from the successful execution
|
||||
if (item.getPrimaryResponse() != null && isConflictException(e)) {
|
||||
setResponse(item, item.getPrimaryResponse());
|
||||
} else {
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE,
|
||||
new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e)));
|
||||
}
|
||||
}
|
||||
} else if (item.request() instanceof UpdateRequest) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) item.request();
|
||||
preVersions[requestIndex] = updateRequest.version();
|
||||
preVersionTypes[requestIndex] = updateRequest.versionType();
|
||||
// We need to do the requested retries plus the initial attempt. We don't do < 1+retry_on_conflict because retry_on_conflict may be Integer.MAX_VALUE
|
||||
for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) {
|
||||
UpdateResult updateResult;
|
||||
try {
|
||||
updateResult = shardUpdateOperation(metaData, request, updateRequest, indexShard);
|
||||
} catch (Throwable t) {
|
||||
updateResult = new UpdateResult(null, null, false, t, null);
|
||||
}
|
||||
if (updateResult.success()) {
|
||||
if (updateResult.writeResult != null) {
|
||||
location = locationToSync(location, updateResult.writeResult.location);
|
||||
}
|
||||
switch (updateResult.result.operation()) {
|
||||
case UPSERT:
|
||||
case INDEX:
|
||||
WriteResult<IndexResponse> result = updateResult.writeResult;
|
||||
IndexRequest indexRequest = updateResult.request();
|
||||
BytesReference indexSourceAsBytes = indexRequest.source();
|
||||
// add the response
|
||||
IndexResponse indexResponse = result.response();
|
||||
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(),
|
||||
indexResponse.getType(), indexResponse.getId(), indexResponse.getSeqNo(),indexResponse.getVersion(), indexResponse.isCreated());
|
||||
if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
|
||||
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
|
||||
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
|
||||
}
|
||||
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest);
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
|
||||
break;
|
||||
case DELETE:
|
||||
WriteResult<DeleteResponse> writeResult = updateResult.writeResult;
|
||||
DeleteResponse response = writeResult.response();
|
||||
DeleteRequest deleteRequest = updateResult.request();
|
||||
updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(),
|
||||
response.getId(), response.getSeqNo(), response.getVersion(), false);
|
||||
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null));
|
||||
// Replace the update request to the translated delete request to execute on the replica.
|
||||
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest);
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
|
||||
break;
|
||||
case NONE:
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResult.noopResult));
|
||||
item.setIgnoreOnReplica(); // no need to go to the replica
|
||||
break;
|
||||
}
|
||||
// NOTE: Breaking out of the retry_on_conflict loop!
|
||||
break;
|
||||
} else if (updateResult.failure()) {
|
||||
Throwable t = updateResult.error;
|
||||
if (updateResult.retry) {
|
||||
// updateAttemptCount is 0 based and marks current attempt, if it's equal to retryOnConflict we are going out of the iteration
|
||||
if (updateAttemptsCount >= updateRequest.retryOnConflict()) {
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE,
|
||||
new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), t)));
|
||||
}
|
||||
} else {
|
||||
// nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed.
|
||||
// some operations were already perform and have a seqno assigned. we shouldn't just reindex them
|
||||
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
|
||||
if (retryPrimaryException(t)) {
|
||||
// restore updated versions...
|
||||
for (int j = 0; j < requestIndex; j++) {
|
||||
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
|
||||
}
|
||||
throw (ElasticsearchException) t;
|
||||
}
|
||||
// if its a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
|
||||
// then just use the response we got from the successful execution
|
||||
if (item.getPrimaryResponse() != null && isConflictException(t)) {
|
||||
setResponse(item, item.getPrimaryResponse());
|
||||
} else if (updateResult.result == null) {
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), t)));
|
||||
} else {
|
||||
switch (updateResult.result.operation()) {
|
||||
case UPSERT:
|
||||
case INDEX:
|
||||
IndexRequest indexRequest = updateResult.request();
|
||||
if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) {
|
||||
logger.trace("{} failed to execute bulk item (index) {}", t, request.shardId(), indexRequest);
|
||||
} else {
|
||||
logger.debug("{} failed to execute bulk item (index) {}", t, request.shardId(), indexRequest);
|
||||
}
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE,
|
||||
new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), t)));
|
||||
break;
|
||||
case DELETE:
|
||||
DeleteRequest deleteRequest = updateResult.request();
|
||||
if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) {
|
||||
logger.trace("{} failed to execute bulk item (delete) {}", t, request.shardId(), deleteRequest);
|
||||
} else {
|
||||
logger.debug("{} failed to execute bulk item (delete) {}", t, request.shardId(), deleteRequest);
|
||||
}
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE,
|
||||
new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), t)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
// NOTE: Breaking out of the retry_on_conflict loop!
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new IllegalStateException("Unexpected index operation: " + item.request());
|
||||
}
|
||||
|
||||
assert item.getPrimaryResponse() != null;
|
||||
assert preVersionTypes[requestIndex] != null;
|
||||
location = handleItem(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item);
|
||||
}
|
||||
|
||||
processAfterWrite(request.refresh(), indexShard, location);
|
||||
@ -310,6 +126,204 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
||||
return new Tuple<>(new BulkShardResponse(request.shardId(), responses), request);
|
||||
}
|
||||
|
||||
private Translog.Location handleItem(MetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
|
||||
if (item.request() instanceof IndexRequest) {
|
||||
location = index(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item);
|
||||
} else if (item.request() instanceof DeleteRequest) {
|
||||
location = delete(request, indexShard, preVersions, preVersionTypes, location, requestIndex, item);
|
||||
} else if (item.request() instanceof UpdateRequest) {
|
||||
Tuple<Translog.Location, BulkItemRequest> tuple = update(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item);
|
||||
location = tuple.v1();
|
||||
item = tuple.v2();
|
||||
} else {
|
||||
throw new IllegalStateException("Unexpected index operation: " + item.request());
|
||||
}
|
||||
|
||||
assert item.getPrimaryResponse() != null;
|
||||
assert preVersionTypes[requestIndex] != null;
|
||||
return location;
|
||||
}
|
||||
|
||||
private Translog.Location index(MetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
|
||||
IndexRequest indexRequest = (IndexRequest) item.request();
|
||||
preVersions[requestIndex] = indexRequest.version();
|
||||
preVersionTypes[requestIndex] = indexRequest.versionType();
|
||||
try {
|
||||
WriteResult<IndexResponse> result = shardIndexOperation(request, indexRequest, metaData, indexShard, true);
|
||||
location = locationToSync(location, result.location);
|
||||
// add the response
|
||||
IndexResponse indexResponse = result.response();
|
||||
setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse));
|
||||
} catch (Throwable e) {
|
||||
// nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed.
|
||||
// some operations were already perform and have a seqno assigned. we shouldn't just reindex them
|
||||
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
|
||||
if (retryPrimaryException(e)) {
|
||||
// restore updated versions...
|
||||
for (int j = 0; j < requestIndex; j++) {
|
||||
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
|
||||
}
|
||||
throw (ElasticsearchException) e;
|
||||
}
|
||||
logFailure(e, "index", request.shardId(), indexRequest);
|
||||
// if its a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
|
||||
// then just use the response we got from the successful execution
|
||||
if (item.getPrimaryResponse() != null && isConflictException(e)) {
|
||||
setResponse(item, item.getPrimaryResponse());
|
||||
} else {
|
||||
setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(),
|
||||
new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e)));
|
||||
}
|
||||
}
|
||||
return location;
|
||||
}
|
||||
|
||||
private <ReplicationRequestT extends ReplicationRequest<ReplicationRequestT>> void logFailure(Throwable e, String operation, ShardId shardId, ReplicationRequest<ReplicationRequestT> request) {
|
||||
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
|
||||
logger.trace("{} failed to execute bulk item ({}) {}", e, shardId, operation, request);
|
||||
} else {
|
||||
logger.debug("{} failed to execute bulk item ({}) {}", e, shardId, operation, request);
|
||||
}
|
||||
}
|
||||
|
||||
private Translog.Location delete(BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
|
||||
DeleteRequest deleteRequest = (DeleteRequest) item.request();
|
||||
preVersions[requestIndex] = deleteRequest.version();
|
||||
preVersionTypes[requestIndex] = deleteRequest.versionType();
|
||||
|
||||
try {
|
||||
// add the response
|
||||
final WriteResult<DeleteResponse> writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard);
|
||||
DeleteResponse deleteResponse = writeResult.response();
|
||||
location = locationToSync(location, writeResult.location);
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse));
|
||||
} catch (Throwable e) {
|
||||
// nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed.
|
||||
// some operations were already perform and have a seqno assigned. we shouldn't just reindex them
|
||||
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
|
||||
if (retryPrimaryException(e)) {
|
||||
// restore updated versions...
|
||||
for (int j = 0; j < requestIndex; j++) {
|
||||
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
|
||||
}
|
||||
throw (ElasticsearchException) e;
|
||||
}
|
||||
logFailure(e, "delete", request.shardId(), deleteRequest);
|
||||
// if its a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
|
||||
// then just use the response we got from the successful execution
|
||||
if (item.getPrimaryResponse() != null && isConflictException(e)) {
|
||||
setResponse(item, item.getPrimaryResponse());
|
||||
} else {
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE,
|
||||
new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e)));
|
||||
}
|
||||
}
|
||||
return location;
|
||||
}
|
||||
|
||||
private Tuple<Translog.Location, BulkItemRequest> update(MetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) item.request();
|
||||
preVersions[requestIndex] = updateRequest.version();
|
||||
preVersionTypes[requestIndex] = updateRequest.versionType();
|
||||
// We need to do the requested retries plus the initial attempt. We don't do < 1+retry_on_conflict because retry_on_conflict may be Integer.MAX_VALUE
|
||||
for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) {
|
||||
UpdateResult updateResult;
|
||||
try {
|
||||
updateResult = shardUpdateOperation(metaData, request, updateRequest, indexShard);
|
||||
} catch (Throwable t) {
|
||||
updateResult = new UpdateResult(null, null, false, t, null);
|
||||
}
|
||||
if (updateResult.success()) {
|
||||
if (updateResult.writeResult != null) {
|
||||
location = locationToSync(location, updateResult.writeResult.location);
|
||||
}
|
||||
switch (updateResult.result.operation()) {
|
||||
case UPSERT:
|
||||
case INDEX:
|
||||
WriteResult<IndexResponse> result = updateResult.writeResult;
|
||||
IndexRequest indexRequest = updateResult.request();
|
||||
BytesReference indexSourceAsBytes = indexRequest.source();
|
||||
// add the response
|
||||
IndexResponse indexResponse = result.response();
|
||||
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getSeqNo(), indexResponse.getVersion(), indexResponse.isCreated());
|
||||
if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
|
||||
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
|
||||
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
|
||||
}
|
||||
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest);
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
|
||||
break;
|
||||
case DELETE:
|
||||
WriteResult<DeleteResponse> writeResult = updateResult.writeResult;
|
||||
DeleteResponse response = writeResult.response();
|
||||
DeleteRequest deleteRequest = updateResult.request();
|
||||
updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), false);
|
||||
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null));
|
||||
// Replace the update request to the translated delete request to execute on the replica.
|
||||
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest);
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
|
||||
break;
|
||||
case NONE:
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResult.noopResult));
|
||||
item.setIgnoreOnReplica(); // no need to go to the replica
|
||||
break;
|
||||
}
|
||||
// NOTE: Breaking out of the retry_on_conflict loop!
|
||||
break;
|
||||
} else if (updateResult.failure()) {
|
||||
Throwable t = updateResult.error;
|
||||
if (updateResult.retry) {
|
||||
// updateAttemptCount is 0 based and marks current attempt, if it's equal to retryOnConflict we are going out of the iteration
|
||||
if (updateAttemptsCount >= updateRequest.retryOnConflict()) {
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE,
|
||||
new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), t)));
|
||||
}
|
||||
} else {
|
||||
// nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed.
|
||||
// some operations were already perform and have a seqno assigned. we shouldn't just reindex them
|
||||
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
|
||||
if (retryPrimaryException(t)) {
|
||||
// restore updated versions...
|
||||
for (int j = 0; j < requestIndex; j++) {
|
||||
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
|
||||
}
|
||||
throw (ElasticsearchException) t;
|
||||
}
|
||||
// if its a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
|
||||
// then just use the response we got from the successful execution
|
||||
if (item.getPrimaryResponse() != null && isConflictException(t)) {
|
||||
setResponse(item, item.getPrimaryResponse());
|
||||
} else if (updateResult.result == null) {
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), t)));
|
||||
} else {
|
||||
switch (updateResult.result.operation()) {
|
||||
case UPSERT:
|
||||
case INDEX:
|
||||
IndexRequest indexRequest = updateResult.request();
|
||||
logFailure(t, "index", request.shardId(), indexRequest);
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE,
|
||||
new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), t)));
|
||||
break;
|
||||
case DELETE:
|
||||
DeleteRequest deleteRequest = updateResult.request();
|
||||
logFailure(t, "delete", request.shardId(), deleteRequest);
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE,
|
||||
new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), t)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
// NOTE: Breaking out of the retry_on_conflict loop!
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return Tuple.tuple(location, item);
|
||||
}
|
||||
|
||||
private void setResponse(BulkItemRequest request, BulkItemResponse response) {
|
||||
request.setPrimaryResponse(response);
|
||||
if (response.isFailed()) {
|
||||
@ -317,7 +331,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
||||
}
|
||||
}
|
||||
|
||||
private WriteResult<IndexResponse> shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, MetaData metaData,
|
||||
private WriteResult shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, MetaData metaData,
|
||||
IndexShard indexShard, boolean processed) throws Throwable {
|
||||
|
||||
// validate, if routing is required, that we got routing
|
||||
@ -493,4 +507,4 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
||||
assert current == null || current.compareTo(next) < 0 : "translog locations are not increasing";
|
||||
return next;
|
||||
}
|
||||
}
|
||||
}
|
@ -96,23 +96,27 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
||||
|
||||
@Override
|
||||
protected void resolveRequest(final MetaData metaData, String concreteIndex, DeleteRequest request) {
|
||||
resolveAndValidateRouting(metaData, concreteIndex, request);
|
||||
ShardId shardId = clusterService.operationRouting().shardId(clusterService.state(), concreteIndex, request.id(), request.routing());
|
||||
request.setShardId(shardId);
|
||||
}
|
||||
|
||||
public static void resolveAndValidateRouting(final MetaData metaData, String concreteIndex, DeleteRequest request) {
|
||||
request.routing(metaData.resolveIndexRouting(request.parent(), request.routing(), request.index()));
|
||||
if (metaData.hasIndex(concreteIndex)) {
|
||||
// check if routing is required, if so, do a broadcast delete
|
||||
// check if routing is required, if so, throw error if routing wasn't specified
|
||||
MappingMetaData mappingMd = metaData.index(concreteIndex).mappingOrDefault(request.type());
|
||||
if (mappingMd != null && mappingMd.routing().required()) {
|
||||
if (request.routing() == null) {
|
||||
if (request.versionType() != VersionType.INTERNAL) {
|
||||
// TODO: implement this feature
|
||||
throw new IllegalArgumentException("routing value is required for deleting documents of type [" + request.type()
|
||||
+ "] while using version_type [" + request.versionType() + "]");
|
||||
+ "] while using version_type [" + request.versionType() + "]");
|
||||
}
|
||||
throw new RoutingMissingException(concreteIndex, request.type(), request.id());
|
||||
}
|
||||
}
|
||||
}
|
||||
ShardId shardId = clusterService.operationRouting().shardId(clusterService.state(), concreteIndex, request.id(), request.routing());
|
||||
request.setShardId(shardId);
|
||||
}
|
||||
|
||||
private void innerExecute(Task task, final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
|
||||
|
@ -44,6 +44,7 @@ import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.fetch.FetchPhase;
|
||||
import org.elasticsearch.search.internal.DefaultSearchContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
|
||||
@ -68,17 +69,20 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
|
||||
private final FetchPhase fetchPhase;
|
||||
|
||||
@Inject
|
||||
public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService,
|
||||
ScriptService scriptService, PageCacheRecycler pageCacheRecycler,
|
||||
BigArrays bigArrays, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
TransportService transportService, IndicesService indicesService, ScriptService scriptService,
|
||||
PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, FetchPhase fetchPhase) {
|
||||
super(settings, ExplainAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ExplainRequest::new, ThreadPool.Names.GET);
|
||||
this.indicesService = indicesService;
|
||||
this.scriptService = scriptService;
|
||||
this.pageCacheRecycler = pageCacheRecycler;
|
||||
this.bigArrays = bigArrays;
|
||||
this.fetchPhase = fetchPhase;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -111,13 +115,10 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
|
||||
}
|
||||
|
||||
SearchContext context = new DefaultSearchContext(
|
||||
0, new ShardSearchLocalRequest(new String[]{request.type()}, request.nowInMillis, request.filteringAlias()),
|
||||
null, result.searcher(), indexService, indexShard,
|
||||
scriptService, pageCacheRecycler,
|
||||
bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
|
||||
SearchService.NO_TIMEOUT
|
||||
);
|
||||
SearchContext context = new DefaultSearchContext(0,
|
||||
new ShardSearchLocalRequest(new String[] { request.type() }, request.nowInMillis, request.filteringAlias()), null,
|
||||
result.searcher(), indexService, indexShard, scriptService, pageCacheRecycler, bigArrays,
|
||||
threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
|
||||
SearchContext.setCurrent(context);
|
||||
|
||||
try {
|
||||
|
@ -62,7 +62,7 @@ public class FieldStatsRequest extends BroadcastRequest<FieldStatsRequest> {
|
||||
|
||||
public void setIndexConstraints(IndexConstraint[] indexConstraints) {
|
||||
if (indexConstraints == null) {
|
||||
throw new NullPointerException("specified index_contraints can't be null");
|
||||
throw new NullPointerException("specified index_constraints can't be null");
|
||||
}
|
||||
this.indexConstraints = indexConstraints;
|
||||
}
|
||||
|
@ -215,9 +215,21 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
validationException = addValidationError("ttl must not be negative", validationException);
|
||||
}
|
||||
}
|
||||
|
||||
if (id != null && id.getBytes(StandardCharsets.UTF_8).length > 512) {
|
||||
validationException = addValidationError("id is too long, must be no longer than 512 bytes but was: " +
|
||||
id.getBytes(StandardCharsets.UTF_8).length, validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
/**
|
||||
* The content type that will be used when generating a document from user provided objects like Maps.
|
||||
*/
|
||||
public XContentType getContentType() {
|
||||
return contentType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the content type that will be used when generating a document from user provided objects (like Map).
|
||||
*/
|
||||
@ -289,6 +301,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String parent() {
|
||||
return this.parent;
|
||||
}
|
||||
@ -626,6 +639,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
if (defaultTimestamp.equals(TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP)) {
|
||||
timestamp = Long.toString(System.currentTimeMillis());
|
||||
} else {
|
||||
// if we are here, the defaultTimestamp is not
|
||||
// TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP but
|
||||
// this can only happen if defaultTimestamp was
|
||||
// assigned again because mappingMd and
|
||||
// mappingMd#timestamp() are not null
|
||||
assert mappingMd != null;
|
||||
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter(), getVersion(metaData, concreteIndex));
|
||||
}
|
||||
}
|
||||
@ -634,7 +653,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
type = in.readString();
|
||||
type = in.readOptionalString();
|
||||
id = in.readOptionalString();
|
||||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
@ -652,7 +671,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(type);
|
||||
out.writeOptionalString(type);
|
||||
out.writeOptionalString(id);
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(parent);
|
||||
|
@ -43,6 +43,7 @@ import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public final class IngestActionFilter extends AbstractComponent implements ActionFilter {
|
||||
|
||||
@ -101,6 +102,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
||||
}
|
||||
|
||||
void processBulkIndexRequest(Task task, BulkRequest original, String action, ActionFilterChain chain, ActionListener<BulkResponse> listener) {
|
||||
long ingestStartTimeInNanos = System.nanoTime();
|
||||
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
|
||||
executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, throwable) -> {
|
||||
logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", throwable, indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id());
|
||||
@ -110,8 +112,9 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
||||
logger.error("failed to execute pipeline for a bulk request", throwable);
|
||||
listener.onFailure(throwable);
|
||||
} else {
|
||||
long ingestTookInMillis = TimeUnit.MILLISECONDS.convert(System.nanoTime() - ingestStartTimeInNanos, TimeUnit.NANOSECONDS);
|
||||
BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest();
|
||||
ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(listener);
|
||||
ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener);
|
||||
if (bulkRequest.requests().isEmpty()) {
|
||||
// at this stage, the transport bulk action can't deal with a bulk request with no requests,
|
||||
// so we stop and send an empty response back to the client.
|
||||
@ -176,11 +179,21 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
||||
}
|
||||
}
|
||||
|
||||
ActionListener<BulkResponse> wrapActionListenerIfNeeded(ActionListener<BulkResponse> actionListener) {
|
||||
ActionListener<BulkResponse> wrapActionListenerIfNeeded(long ingestTookInMillis, ActionListener<BulkResponse> actionListener) {
|
||||
if (itemResponses.isEmpty()) {
|
||||
return actionListener;
|
||||
return new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse response) {
|
||||
actionListener.onResponse(new BulkResponse(response.getItems(), response.getTookInMillis(), ingestTookInMillis));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
actionListener.onFailure(e);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return new IngestBulkResponseListener(originalSlots, itemResponses, actionListener);
|
||||
return new IngestBulkResponseListener(ingestTookInMillis, originalSlots, itemResponses, actionListener);
|
||||
}
|
||||
}
|
||||
|
||||
@ -197,24 +210,26 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
||||
|
||||
}
|
||||
|
||||
private final static class IngestBulkResponseListener implements ActionListener<BulkResponse> {
|
||||
final static class IngestBulkResponseListener implements ActionListener<BulkResponse> {
|
||||
|
||||
private final long ingestTookInMillis;
|
||||
private final int[] originalSlots;
|
||||
private final List<BulkItemResponse> itemResponses;
|
||||
private final ActionListener<BulkResponse> actionListener;
|
||||
|
||||
IngestBulkResponseListener(int[] originalSlots, List<BulkItemResponse> itemResponses, ActionListener<BulkResponse> actionListener) {
|
||||
IngestBulkResponseListener(long ingestTookInMillis, int[] originalSlots, List<BulkItemResponse> itemResponses, ActionListener<BulkResponse> actionListener) {
|
||||
this.ingestTookInMillis = ingestTookInMillis;
|
||||
this.itemResponses = itemResponses;
|
||||
this.actionListener = actionListener;
|
||||
this.originalSlots = originalSlots;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponse(BulkResponse bulkItemResponses) {
|
||||
for (int i = 0; i < bulkItemResponses.getItems().length; i++) {
|
||||
itemResponses.add(originalSlots[i], bulkItemResponses.getItems()[i]);
|
||||
public void onResponse(BulkResponse response) {
|
||||
for (int i = 0; i < response.getItems().length; i++) {
|
||||
itemResponses.add(originalSlots[i], response.getItems()[i]);
|
||||
}
|
||||
actionListener.onResponse(new BulkResponse(itemResponses.toArray(new BulkItemResponse[itemResponses.size()]), bulkItemResponses.getTookInMillis()));
|
||||
actionListener.onResponse(new BulkResponse(itemResponses.toArray(new BulkItemResponse[itemResponses.size()]), response.getTookInMillis(), ingestTookInMillis));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -20,6 +20,10 @@
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
@ -27,24 +31,32 @@ import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.ingest.core.IngestInfo;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPipelineRequest, WritePipelineResponse> {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
private final ClusterService clusterService;
|
||||
private final TransportNodesInfoAction nodesInfoAction;
|
||||
|
||||
@Inject
|
||||
public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) {
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService,
|
||||
TransportNodesInfoAction nodesInfoAction) {
|
||||
super(settings, PutPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.nodesInfoAction = nodesInfoAction;
|
||||
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
|
||||
}
|
||||
|
||||
@ -60,7 +72,28 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPip
|
||||
|
||||
@Override
|
||||
protected void masterOperation(PutPipelineRequest request, ClusterState state, ActionListener<WritePipelineResponse> listener) throws Exception {
|
||||
pipelineStore.put(clusterService, request, listener);
|
||||
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
|
||||
nodesInfoRequest.clear();
|
||||
nodesInfoRequest.ingest(true);
|
||||
nodesInfoAction.execute(nodesInfoRequest, new ActionListener<NodesInfoResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesInfoResponse nodeInfos) {
|
||||
try {
|
||||
Map<DiscoveryNode, IngestInfo> ingestInfos = new HashMap<>();
|
||||
for (NodeInfo nodeInfo : nodeInfos) {
|
||||
ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest());
|
||||
}
|
||||
pipelineStore.put(clusterService, ingestInfos, request, listener);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -26,7 +26,8 @@ import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
|
||||
@ -152,16 +153,26 @@ public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder<Pe
|
||||
}
|
||||
|
||||
/**
|
||||
* Delegates to {@link PercolateSourceBuilder#addAggregation(AbstractAggregationBuilder)}
|
||||
* Delegates to
|
||||
* {@link PercolateSourceBuilder#addAggregation(AggregatorBuilder)}
|
||||
*/
|
||||
public PercolateRequestBuilder addAggregation(AbstractAggregationBuilder aggregationBuilder) {
|
||||
public PercolateRequestBuilder addAggregation(AggregatorBuilder<?> aggregationBuilder) {
|
||||
sourceBuilder().addAggregation(aggregationBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the percolate request definition directly on the request.
|
||||
* This will overwrite any definitions set by any of the delegate methods.
|
||||
* Delegates to
|
||||
* {@link PercolateSourceBuilder#addAggregation(PipelineAggregatorBuilder)}
|
||||
*/
|
||||
public PercolateRequestBuilder addAggregation(PipelineAggregatorBuilder aggregationBuilder) {
|
||||
sourceBuilder().addAggregation(aggregationBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the percolate request definition directly on the request. This will
|
||||
* overwrite any definitions set by any of the delegate methods.
|
||||
*/
|
||||
public PercolateRequestBuilder setSource(PercolateSourceBuilder source) {
|
||||
sourceBuilder = source;
|
||||
|
@ -29,7 +29,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.sort.ScoreSortBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
@ -51,7 +53,8 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
|
||||
private List<SortBuilder> sorts;
|
||||
private Boolean trackScores;
|
||||
private HighlightBuilder highlightBuilder;
|
||||
private List<AbstractAggregationBuilder> aggregations;
|
||||
private List<AggregatorBuilder<?>> aggregationBuilders;
|
||||
private List<PipelineAggregatorBuilder> pipelineAggregationBuilders;
|
||||
|
||||
/**
|
||||
* Sets the document to run the percolate queries against.
|
||||
@ -123,11 +126,22 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
|
||||
/**
|
||||
* Add an aggregation definition.
|
||||
*/
|
||||
public PercolateSourceBuilder addAggregation(AbstractAggregationBuilder aggregationBuilder) {
|
||||
if (aggregations == null) {
|
||||
aggregations = new ArrayList<>();
|
||||
public PercolateSourceBuilder addAggregation(AggregatorBuilder<?> aggregationBuilder) {
|
||||
if (aggregationBuilders == null) {
|
||||
aggregationBuilders = new ArrayList<>();
|
||||
}
|
||||
aggregations.add(aggregationBuilder);
|
||||
aggregationBuilders.add(aggregationBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add an aggregation definition.
|
||||
*/
|
||||
public PercolateSourceBuilder addAggregation(PipelineAggregatorBuilder aggregationBuilder) {
|
||||
if (pipelineAggregationBuilders == null) {
|
||||
pipelineAggregationBuilders = new ArrayList<>();
|
||||
}
|
||||
pipelineAggregationBuilders.add(aggregationBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -157,13 +171,20 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
|
||||
builder.field("track_scores", trackScores);
|
||||
}
|
||||
if (highlightBuilder != null) {
|
||||
highlightBuilder.toXContent(builder, params);
|
||||
builder.field(SearchSourceBuilder.HIGHLIGHT_FIELD.getPreferredName(), highlightBuilder);
|
||||
}
|
||||
if (aggregations != null) {
|
||||
if (aggregationBuilders != null || pipelineAggregationBuilders != null) {
|
||||
builder.field("aggregations");
|
||||
builder.startObject();
|
||||
for (AbstractAggregationBuilder aggregation : aggregations) {
|
||||
aggregation.toXContent(builder, params);
|
||||
if (aggregationBuilders != null) {
|
||||
for (AggregatorBuilder<?> aggregation : aggregationBuilders) {
|
||||
aggregation.toXContent(builder, params);
|
||||
}
|
||||
}
|
||||
if (pipelineAggregationBuilders != null) {
|
||||
for (PipelineAggregatorBuilder aggregation : pipelineAggregationBuilders) {
|
||||
aggregation.toXContent(builder, params);
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
@ -17,12 +17,12 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
/**
|
||||
* Base implementation for an async action.
|
||||
*/
|
||||
public class AbstractAsyncAction {
|
||||
abstract class AbstractAsyncAction {
|
||||
|
||||
private final long startTime;
|
||||
|
||||
@ -46,4 +46,5 @@ public class AbstractAsyncAction {
|
||||
return Math.max(1, System.currentTimeMillis() - startTime);
|
||||
}
|
||||
|
||||
abstract void start();
|
||||
}
|
@ -0,0 +1,393 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalSearchRequest;
|
||||
|
||||
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
|
||||
|
||||
protected final ESLogger logger;
|
||||
protected final SearchTransportService searchTransportService;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
protected final SearchPhaseController searchPhaseController;
|
||||
protected final ThreadPool threadPool;
|
||||
protected final ActionListener<SearchResponse> listener;
|
||||
protected final GroupShardsIterator shardsIts;
|
||||
protected final SearchRequest request;
|
||||
protected final ClusterState clusterState;
|
||||
protected final DiscoveryNodes nodes;
|
||||
protected final int expectedSuccessfulOps;
|
||||
private final int expectedTotalOps;
|
||||
protected final AtomicInteger successfulOps = new AtomicInteger();
|
||||
private final AtomicInteger totalOps = new AtomicInteger();
|
||||
protected final AtomicArray<FirstResult> firstResults;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final Object shardFailuresMutex = new Object();
|
||||
protected volatile ScoreDoc[] sortedShardList;
|
||||
|
||||
protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, ClusterService clusterService,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.threadPool = threadPool;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
|
||||
this.clusterState = clusterService.state();
|
||||
nodes = clusterState.nodes();
|
||||
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
|
||||
|
||||
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
|
||||
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
|
||||
// of just for the _search api
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request.indicesOptions(),
|
||||
startTime(), request.indices());
|
||||
|
||||
for (String index : concreteIndices) {
|
||||
clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index);
|
||||
}
|
||||
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(),
|
||||
request.indices());
|
||||
|
||||
shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
|
||||
expectedSuccessfulOps = shardsIts.size();
|
||||
// we need to add 1 for non active partition, since we count it in the total!
|
||||
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
|
||||
|
||||
firstResults = new AtomicArray<>(shardsIts.size());
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (expectedSuccessfulOps == 0) {
|
||||
//no search shards to search on, bail with empty response
|
||||
//(it happens with search across _all with no indices around and consistent with broadcast operations)
|
||||
listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, buildTookInMillis(),
|
||||
ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
int shardIndex = -1;
|
||||
for (final ShardIterator shardIt : shardsIts) {
|
||||
shardIndex++;
|
||||
final ShardRouting shard = shardIt.nextOrNull();
|
||||
if (shard != null) {
|
||||
performFirstPhase(shardIndex, shardIt, shard);
|
||||
} else {
|
||||
// really, no shards active in this group
|
||||
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void performFirstPhase(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) {
|
||||
if (shard == null) {
|
||||
// no more active shards... (we should not really get here, but just for safety)
|
||||
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
final DiscoveryNode node = nodes.get(shard.currentNodeId());
|
||||
if (node == null) {
|
||||
onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterState,
|
||||
shard.index().getName(), request.indices());
|
||||
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases,
|
||||
startTime()), new ActionListener<FirstResult>() {
|
||||
@Override
|
||||
public void onResponse(FirstResult result) {
|
||||
onFirstPhaseResult(shardIndex, shard, result, shardIt);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onFirstPhaseResult(shardIndex, shard, node.id(), shardIt, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result, ShardIterator shardIt) {
|
||||
result.shardTarget(new SearchShardTarget(shard.currentNodeId(), shard.index(), shard.id()));
|
||||
processFirstPhaseResult(shardIndex, result);
|
||||
// we need to increment successful ops first before we compare the exit condition otherwise if we
|
||||
// are fast we could concurrently update totalOps but then preempt one of the threads which can
|
||||
// cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
|
||||
successfulOps.incrementAndGet();
|
||||
// increment all the "future" shards to update the total ops since we some may work and some may not...
|
||||
// and when that happens, we break on total ops, so we must maintain them
|
||||
final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
|
||||
if (xTotalOps == expectedTotalOps) {
|
||||
try {
|
||||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e);
|
||||
}
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
|
||||
}
|
||||
} else if (xTotalOps > expectedTotalOps) {
|
||||
raiseEarlyFailure(new IllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared " +
|
||||
"to expected [" + expectedTotalOps + "]"));
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId,
|
||||
final ShardIterator shardIt, Throwable t) {
|
||||
// we always add the shard failure for a specific shard instance
|
||||
// we do make sure to clean it on a successful response from a shard
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId().getIndex(), shardIt.shardId().getId());
|
||||
addShardFailure(shardIndex, shardTarget, t);
|
||||
|
||||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
if (shard != null) {
|
||||
logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t);
|
||||
} else {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
|
||||
}
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("{}: Failed to execute [{}]", t, shard, request);
|
||||
}
|
||||
}
|
||||
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
|
||||
if (successfulOps.get() == 0) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("All shards failed for phase: [{}]", t, firstPhaseName());
|
||||
}
|
||||
|
||||
// no successful ops, raise an exception
|
||||
raiseEarlyFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", t, shardSearchFailures));
|
||||
} else {
|
||||
try {
|
||||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, shardSearchFailures));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
final ShardRouting nextShard = shardIt.nextOrNull();
|
||||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
}
|
||||
if (!lastShard) {
|
||||
try {
|
||||
performFirstPhase(shardIndex, shardIt, nextShard);
|
||||
} catch (Throwable t1) {
|
||||
onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, t1);
|
||||
}
|
||||
} else {
|
||||
// no more shards active, add a failure
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
logger.debug(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String executionFailureMsg(@Nullable ShardRouting shard, final ShardIterator shardIt, SearchRequest request,
|
||||
boolean lastShard) {
|
||||
if (shard != null) {
|
||||
return shard.shortSummary() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
} else {
|
||||
return shardIt.shardId() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
}
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
protected final void addShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Throwable t) {
|
||||
// we don't aggregate shard failures on non active shards (but do keep the header counts right)
|
||||
if (TransportActions.isShardNotAvailableException(t)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures)
|
||||
if (shardFailures == null) {
|
||||
synchronized (shardFailuresMutex) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(shardsIts.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
ShardSearchFailure failure = shardFailures.get(shardIndex);
|
||||
if (failure == null) {
|
||||
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
|
||||
} else {
|
||||
// the failure is already present, try and not override it with an exception that is less meaningless
|
||||
// for example, getting illegal shard state
|
||||
if (TransportActions.isReadOverrideException(t)) {
|
||||
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void raiseEarlyFailure(Throwable t) {
|
||||
for (AtomicArray.Entry<FirstResult> entry : firstResults.asList()) {
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.shardTarget().nodeId());
|
||||
sendReleaseSearchContext(entry.value.id(), node);
|
||||
} catch (Throwable t1) {
|
||||
logger.trace("failed to release context", t1);
|
||||
}
|
||||
}
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases shard targets that are not used in the docsIdsToLoad.
|
||||
*/
|
||||
protected void releaseIrrelevantSearchContexts(AtomicArray<? extends QuerySearchResultProvider> queryResults,
|
||||
AtomicArray<IntArrayList> docIdsToLoad) {
|
||||
if (docIdsToLoad == null) {
|
||||
return;
|
||||
}
|
||||
// we only release search context that we did not fetch from if we are not scrolling
|
||||
if (request.scroll() == null) {
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults.asList()) {
|
||||
final TopDocs topDocs = entry.value.queryResult().queryResult().topDocs();
|
||||
if (topDocs != null && topDocs.scoreDocs.length > 0 // the shard had matches
|
||||
&& docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId());
|
||||
sendReleaseSearchContext(entry.value.queryResult().id(), node);
|
||||
} catch (Throwable t1) {
|
||||
logger.trace("failed to release context", t1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void sendReleaseSearchContext(long contextId, DiscoveryNode node) {
|
||||
if (node != null) {
|
||||
searchTransportService.sendFreeContext(node, contextId, request);
|
||||
}
|
||||
}
|
||||
|
||||
protected ShardFetchSearchRequest createFetchRequest(QuerySearchResult queryResult, AtomicArray.Entry<IntArrayList> entry,
|
||||
ScoreDoc[] lastEmittedDocPerShard) {
|
||||
if (lastEmittedDocPerShard != null) {
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value, lastEmittedDoc);
|
||||
} else {
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value);
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<FirstResult> listener);
|
||||
|
||||
protected final void processFirstPhaseResult(int shardIndex, FirstResult result) {
|
||||
firstResults.set(shardIndex, result);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null);
|
||||
}
|
||||
|
||||
// clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
|
||||
// so its ok concurrency wise to miss potentially the shard failures being created because of another failure
|
||||
// in the #addShardFailure, because by definition, it will happen on *another* shardIndex
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures != null) {
|
||||
shardFailures.set(shardIndex, null);
|
||||
}
|
||||
}
|
||||
|
||||
final void innerMoveToSecondPhase() throws Exception {
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
boolean hadOne = false;
|
||||
for (int i = 0; i < firstResults.length(); i++) {
|
||||
FirstResult result = firstResults.get(i);
|
||||
if (result == null) {
|
||||
continue; // failure
|
||||
}
|
||||
if (hadOne) {
|
||||
sb.append(",");
|
||||
} else {
|
||||
hadOne = true;
|
||||
}
|
||||
sb.append(result.shardTarget());
|
||||
}
|
||||
|
||||
logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterState.version());
|
||||
}
|
||||
moveToSecondPhase();
|
||||
}
|
||||
|
||||
protected abstract void moveToSecondPhase() throws Exception;
|
||||
|
||||
protected abstract String firstPhaseName();
|
||||
}
|
@ -17,14 +17,14 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class ParsedScrollId {
|
||||
class ParsedScrollId {
|
||||
|
||||
public static final String QUERY_THEN_FETCH_TYPE = "queryThenFetch";
|
||||
|
@ -17,9 +17,9 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
public class ScrollIdForNode {
|
||||
class ScrollIdForNode {
|
||||
private final String node;
|
||||
private final long scrollId;
|
||||
|
@ -0,0 +1,143 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> {
|
||||
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
|
||||
SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
|
||||
request, listener);
|
||||
queryFetchResults = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "dfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<DfsSearchResult> listener) {
|
||||
searchTransportService.sendExecuteDfs(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
|
||||
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
|
||||
}
|
||||
}
|
||||
|
||||
void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter,
|
||||
final DiscoveryNode node, final QuerySearchRequest querySearchRequest) {
|
||||
searchTransportService.sendExecuteFetch(node, querySearchRequest, new ActionListener<QueryFetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QueryFetchSearchResult result) {
|
||||
result.shardTarget(dfsResult.shardTarget());
|
||||
queryFetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
onSecondPhaseFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected execution)
|
||||
// and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), node);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onSecondPhaseFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("query_fetch", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
}
|
@ -0,0 +1,224 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> {
|
||||
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
|
||||
request, listener);
|
||||
queryResults = new AtomicArray<>(firstResults.length());
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
docIdsToLoad = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "dfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<DfsSearchResult> listener) {
|
||||
searchTransportService.sendExecuteDfs(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
executeQuery(entry.index, dfsResult, counter, querySearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeQuery(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter,
|
||||
final QuerySearchRequest querySearchRequest, final DiscoveryNode node) {
|
||||
searchTransportService.sendExecuteQuery(node, querySearchRequest, new ActionListener<QuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QuerySearchResult result) {
|
||||
result.shardTarget(dfsResult.shardTarget());
|
||||
queryResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
executeFetchPhase();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
onQueryFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected
|
||||
// execution) and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), node);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onQueryFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", buildShardFailures()));
|
||||
} else {
|
||||
executeFetchPhase();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetchPhase() {
|
||||
try {
|
||||
innerExecuteFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("query", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
void innerExecuteFetchPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, queryResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResult queryResult = queryResults.get(entry.index);
|
||||
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard);
|
||||
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter,
|
||||
final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
|
||||
searchTransportService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(shardTarget);
|
||||
fetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context. by setting docIdsToLoad to null, the context will be cleared
|
||||
// in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done.
|
||||
docIdsToLoad.set(shardIndex, null);
|
||||
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex,
|
||||
SearchShardTarget shardTarget, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
} finally {
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> {
|
||||
|
||||
SearchQueryAndFetchAsyncAction(ESLogger logger, SearchTransportService searchTransportService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
|
||||
request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "query_fetch";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<QueryFetchSearchResult> listener) {
|
||||
searchTransportService.sendExecuteFetch(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
firstResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
@ -0,0 +1,157 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySearchResultProvider> {
|
||||
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
SearchQueryThenFetchAsyncAction(ESLogger logger, SearchTransportService searchService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener);
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
docIdsToLoad = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "query";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<QuerySearchResultProvider> listener) {
|
||||
searchTransportService.sendExecuteQuery(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResultProvider queryResult = firstResults.get(entry.index);
|
||||
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), entry, lastEmittedDocPerShard);
|
||||
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter,
|
||||
final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
|
||||
searchTransportService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(shardTarget);
|
||||
fetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context. by setting docIdsToLoad to null, the context will be cleared
|
||||
// in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done.
|
||||
docIdsToLoad.set(shardIndex, null);
|
||||
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps,
|
||||
successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
} finally {
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
@ -28,7 +28,8 @@ import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.Template;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
@ -371,9 +372,17 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an get to the search operation.
|
||||
* Adds an aggregation to the search operation.
|
||||
*/
|
||||
public SearchRequestBuilder addAggregation(AbstractAggregationBuilder aggregation) {
|
||||
public SearchRequestBuilder addAggregation(AggregatorBuilder<?> aggregation) {
|
||||
sourceBuilder().aggregation(aggregation);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an aggregation to the search operation.
|
||||
*/
|
||||
public SearchRequestBuilder addAggregation(PipelineAggregatorBuilder aggregation) {
|
||||
sourceBuilder().aggregation(aggregation);
|
||||
return this;
|
||||
}
|
||||
|
@ -0,0 +1,181 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final ESLogger logger;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchScrollRequest request;
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
private final ParsedScrollId scrollId;
|
||||
private final DiscoveryNodes nodes;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
private final AtomicInteger successfulOps;
|
||||
private final AtomicInteger counter;
|
||||
|
||||
SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService,
|
||||
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
|
||||
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
this.counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
this.queryFetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executePhase(i, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (ScrollIdForNode target : scrollId.getContext()) {
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchTransportService.sendExecuteFetch(node, internalRequest, new ActionListener<ScrollQueryFetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(ScrollQueryFetchSearchResult result) {
|
||||
queryFetchResults.set(shardIndex, result.result());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onPhaseFailure(t, searchId, shardIndex);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void onPhaseFailure(Throwable t, long searchId, int shardIndex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, searchId);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(t));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query_fetch", "all shards failed", t, buildShardFailures()));
|
||||
} else {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() throws Exception {
|
||||
ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
@ -0,0 +1,226 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchRequest;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final ESLogger logger;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final SearchScrollRequest request;
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
private final ParsedScrollId scrollId;
|
||||
private final DiscoveryNodes nodes;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
private volatile ScoreDoc[] sortedShardList;
|
||||
private final AtomicInteger successfulOps;
|
||||
|
||||
SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService,
|
||||
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
|
||||
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
this.queryResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
this.fetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
final AtomicInteger counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executeQueryPhase(i, counter, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchTransportService.sendExecuteQuery(node, internalRequest, new ActionListener<ScrollQuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(ScrollQuerySearchResult result) {
|
||||
queryResults.set(shardIndex, result.queryResult());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onQueryPhaseFailure(shardIndex, counter, searchId, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Throwable t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, searchId);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(t));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", t, buildShardFailures()));
|
||||
} else {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeFetchPhase() throws Exception {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryResults);
|
||||
AtomicArray<IntArrayList> docIdsToLoad = new AtomicArray<>(queryResults.length());
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(sortedShardList, queryResults.length());
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
IntArrayList docIds = entry.value;
|
||||
final QuerySearchResult querySearchResult = queryResults.get(entry.index);
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc);
|
||||
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
|
||||
searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(querySearchResult.shardTarget());
|
||||
fetchResults.set(entry.index, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Failed to execute fetch phase", t);
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() {
|
||||
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
@ -32,8 +32,6 @@ import org.elasticsearch.search.SearchShardTarget;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
|
||||
|
||||
/**
|
||||
* Represents a failure to search on a specific shard.
|
||||
*/
|
||||
@ -106,7 +104,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
||||
@Override
|
||||
public int shardId() {
|
||||
if (shardTarget != null) {
|
||||
return shardTarget.shardId();
|
||||
return shardTarget.shardId().id();
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
@ -133,7 +131,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
if (in.readBoolean()) {
|
||||
shardTarget = readSearchShardTarget(in);
|
||||
shardTarget = new SearchShardTarget(in);
|
||||
}
|
||||
reason = in.readString();
|
||||
status = RestStatus.readFrom(in);
|
||||
|
@ -20,7 +20,6 @@
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.type.ScrollIdForNode;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
@ -31,7 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
@ -41,22 +40,22 @@ import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.parseScrollId;
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class TransportClearScrollAction extends HandledTransportAction<ClearScrollRequest, ClearScrollResponse> {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final SearchServiceTransportAction searchServiceTransportAction;
|
||||
private final SearchTransportService searchTransportService;
|
||||
|
||||
@Inject
|
||||
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
|
||||
ClusterService clusterService, SearchServiceTransportAction searchServiceTransportAction,
|
||||
ClusterService clusterService, SearchTransportService searchTransportService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchServiceTransportAction = searchServiceTransportAction;
|
||||
this.searchTransportService = searchTransportService;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -65,10 +64,8 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
||||
}
|
||||
|
||||
private class Async {
|
||||
|
||||
final DiscoveryNodes nodes;
|
||||
final CountDown expectedOps;
|
||||
final ClearScrollRequest request;
|
||||
final List<ScrollIdForNode[]> contexts = new ArrayList<>();
|
||||
final ActionListener<ClearScrollResponse> listener;
|
||||
final AtomicReference<Throwable> expHolder;
|
||||
@ -86,8 +83,6 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
||||
this.contexts.add(context);
|
||||
}
|
||||
}
|
||||
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.expHolder = new AtomicReference<>();
|
||||
this.expectedOps = new CountDown(expectedOps);
|
||||
@ -101,7 +96,7 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
||||
|
||||
if (contexts.isEmpty()) {
|
||||
for (final DiscoveryNode node : nodes) {
|
||||
searchServiceTransportAction.sendClearAllScrollContexts(node, request, new ActionListener<TransportResponse>() {
|
||||
searchTransportService.sendClearAllScrollContexts(node, new ActionListener<TransportResponse>() {
|
||||
@Override
|
||||
public void onResponse(TransportResponse response) {
|
||||
onFreedContext(true);
|
||||
@ -122,9 +117,9 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
||||
continue;
|
||||
}
|
||||
|
||||
searchServiceTransportAction.sendFreeContext(node, target.getScrollId(), request, new ActionListener<SearchServiceTransportAction.SearchFreeContextResponse>() {
|
||||
searchTransportService.sendFreeContext(node, target.getScrollId(), new ActionListener<SearchTransportService.SearchFreeContextResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchServiceTransportAction.SearchFreeContextResponse freed) {
|
||||
public void onResponse(SearchTransportService.SearchFreeContextResponse freed) {
|
||||
onFreedContext(freed.isFreed());
|
||||
}
|
||||
|
||||
|
@ -20,10 +20,6 @@
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
@ -33,13 +29,14 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.action.search.SearchType.DFS_QUERY_THEN_FETCH;
|
||||
import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH;
|
||||
|
||||
/**
|
||||
@ -48,58 +45,60 @@ import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH;
|
||||
public class TransportSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final TransportSearchDfsQueryThenFetchAction dfsQueryThenFetchAction;
|
||||
private final TransportSearchQueryThenFetchAction queryThenFetchAction;
|
||||
private final TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction;
|
||||
private final TransportSearchQueryAndFetchAction queryAndFetchAction;
|
||||
private final boolean optimizeSingleShard;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ClusterService clusterService,
|
||||
TransportSearchDfsQueryThenFetchAction dfsQueryThenFetchAction,
|
||||
TransportSearchQueryThenFetchAction queryThenFetchAction,
|
||||
TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction,
|
||||
TransportSearchQueryAndFetchAction queryAndFetchAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
public TransportSearchAction(Settings settings, ThreadPool threadPool, SearchPhaseController searchPhaseController,
|
||||
TransportService transportService, SearchTransportService searchTransportService,
|
||||
ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver
|
||||
indexNameExpressionResolver) {
|
||||
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.clusterService = clusterService;
|
||||
this.dfsQueryThenFetchAction = dfsQueryThenFetchAction;
|
||||
this.queryThenFetchAction = queryThenFetchAction;
|
||||
this.dfsQueryAndFetchAction = dfsQueryAndFetchAction;
|
||||
this.queryAndFetchAction = queryAndFetchAction;
|
||||
this.optimizeSingleShard = this.settings.getAsBoolean("action.search.optimize_single_shard", true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
// optimize search type for cases where there is only one shard group to search on
|
||||
if (optimizeSingleShard) {
|
||||
try {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest);
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices());
|
||||
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap);
|
||||
if (shardCount == 1) {
|
||||
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
|
||||
searchRequest.searchType(QUERY_AND_FETCH);
|
||||
}
|
||||
} catch (IndexNotFoundException | IndexClosedException e) {
|
||||
// ignore these failures, we will notify the search response if its really the case from the actual action
|
||||
} catch (Exception e) {
|
||||
logger.debug("failed to optimize search type, continue as normal", e);
|
||||
try {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest);
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState,
|
||||
searchRequest.routing(), searchRequest.indices());
|
||||
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap);
|
||||
if (shardCount == 1) {
|
||||
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
|
||||
searchRequest.searchType(QUERY_AND_FETCH);
|
||||
}
|
||||
} catch (IndexNotFoundException | IndexClosedException e) {
|
||||
// ignore these failures, we will notify the search response if its really the case from the actual action
|
||||
} catch (Exception e) {
|
||||
logger.debug("failed to optimize search type, continue as normal", e);
|
||||
}
|
||||
if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) {
|
||||
dfsQueryThenFetchAction.execute(searchRequest, listener);
|
||||
} else if (searchRequest.searchType() == SearchType.QUERY_THEN_FETCH) {
|
||||
queryThenFetchAction.execute(searchRequest, listener);
|
||||
} else if (searchRequest.searchType() == SearchType.DFS_QUERY_AND_FETCH) {
|
||||
dfsQueryAndFetchAction.execute(searchRequest, listener);
|
||||
} else if (searchRequest.searchType() == SearchType.QUERY_AND_FETCH) {
|
||||
queryAndFetchAction.execute(searchRequest, listener);
|
||||
} else {
|
||||
throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]");
|
||||
|
||||
AbstractSearchAsyncAction searchAsyncAction;
|
||||
switch(searchRequest.searchType()) {
|
||||
case DFS_QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, clusterService,
|
||||
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
|
||||
break;
|
||||
case QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, clusterService,
|
||||
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
|
||||
break;
|
||||
case DFS_QUERY_AND_FETCH:
|
||||
searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, clusterService,
|
||||
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
|
||||
break;
|
||||
case QUERY_AND_FETCH:
|
||||
searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, clusterService,
|
||||
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]");
|
||||
}
|
||||
searchAsyncAction.start();
|
||||
}
|
||||
}
|
||||
|
@ -17,13 +17,10 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Base64;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
@ -42,17 +39,19 @@ import static java.util.Collections.emptyMap;
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class TransportSearchHelper {
|
||||
final class TransportSearchHelper {
|
||||
|
||||
public static ShardSearchTransportRequest internalSearchRequest(ShardRouting shardRouting, int numberOfShards, SearchRequest request, String[] filteringAliases, long nowInMillis) {
|
||||
static ShardSearchTransportRequest internalSearchRequest(ShardRouting shardRouting, int numberOfShards, SearchRequest request,
|
||||
String[] filteringAliases, long nowInMillis) {
|
||||
return new ShardSearchTransportRequest(request, shardRouting, numberOfShards, filteringAliases, nowInMillis);
|
||||
}
|
||||
|
||||
public static InternalScrollSearchRequest internalScrollSearchRequest(long id, SearchScrollRequest request) {
|
||||
static InternalScrollSearchRequest internalScrollSearchRequest(long id, SearchScrollRequest request) {
|
||||
return new InternalScrollSearchRequest(request, id);
|
||||
}
|
||||
|
||||
public static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults, @Nullable Map<String, String> attributes) throws IOException {
|
||||
static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults,
|
||||
@Nullable Map<String, String> attributes) throws IOException {
|
||||
if (searchType == SearchType.DFS_QUERY_THEN_FETCH || searchType == SearchType.QUERY_THEN_FETCH) {
|
||||
return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults, attributes);
|
||||
} else if (searchType == SearchType.QUERY_AND_FETCH || searchType == SearchType.DFS_QUERY_AND_FETCH) {
|
||||
@ -62,7 +61,8 @@ public abstract class TransportSearchHelper {
|
||||
}
|
||||
}
|
||||
|
||||
public static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults, @Nullable Map<String, String> attributes) throws IOException {
|
||||
static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults,
|
||||
@Nullable Map<String, String> attributes) throws IOException {
|
||||
StringBuilder sb = new StringBuilder().append(type).append(';');
|
||||
sb.append(searchPhaseResults.asList().size()).append(';');
|
||||
for (AtomicArray.Entry<? extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
|
||||
@ -81,7 +81,7 @@ public abstract class TransportSearchHelper {
|
||||
return Base64.encodeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length, Base64.URL_SAFE);
|
||||
}
|
||||
|
||||
public static ParsedScrollId parseScrollId(String scrollId) {
|
||||
static ParsedScrollId parseScrollId(String scrollId) {
|
||||
CharsRefBuilder spare = new CharsRefBuilder();
|
||||
try {
|
||||
byte[] decode = Base64.decode(scrollId, Base64.URL_SAFE);
|
||||
@ -128,5 +128,4 @@ public abstract class TransportSearchHelper {
|
||||
private TransportSearchHelper() {
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -20,51 +20,60 @@
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.type.ParsedScrollId;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryThenFetchAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import static org.elasticsearch.action.search.type.ParsedScrollId.QUERY_AND_FETCH_TYPE;
|
||||
import static org.elasticsearch.action.search.type.ParsedScrollId.QUERY_THEN_FETCH_TYPE;
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.parseScrollId;
|
||||
import static org.elasticsearch.action.search.ParsedScrollId.QUERY_AND_FETCH_TYPE;
|
||||
import static org.elasticsearch.action.search.ParsedScrollId.QUERY_THEN_FETCH_TYPE;
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchScrollAction extends HandledTransportAction<SearchScrollRequest, SearchResponse> {
|
||||
|
||||
private final TransportSearchScrollQueryThenFetchAction queryThenFetchAction;
|
||||
private final TransportSearchScrollQueryAndFetchAction queryAndFetchAction;
|
||||
private final ClusterService clusterService;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
TransportSearchScrollQueryThenFetchAction queryThenFetchAction,
|
||||
TransportSearchScrollQueryAndFetchAction queryAndFetchAction,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchScrollRequest::new);
|
||||
this.queryThenFetchAction = queryThenFetchAction;
|
||||
this.queryAndFetchAction = queryAndFetchAction;
|
||||
ClusterService clusterService, SearchTransportService searchTransportService,
|
||||
SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
SearchScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchScrollRequest request, ActionListener<SearchResponse> listener) {
|
||||
try {
|
||||
ParsedScrollId scrollId = parseScrollId(request.scrollId());
|
||||
if (scrollId.getType().equals(QUERY_THEN_FETCH_TYPE)) {
|
||||
queryThenFetchAction.execute(request, scrollId, listener);
|
||||
} else if (scrollId.getType().equals(QUERY_AND_FETCH_TYPE)) {
|
||||
queryAndFetchAction.execute(request, scrollId, listener);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized");
|
||||
AbstractAsyncAction action;
|
||||
switch (scrollId.getType()) {
|
||||
case QUERY_THEN_FETCH_TYPE:
|
||||
action = new SearchScrollQueryThenFetchAsyncAction(logger, clusterService, searchTransportService,
|
||||
searchPhaseController, request, scrollId, listener);
|
||||
break;
|
||||
case QUERY_AND_FETCH_TYPE:
|
||||
action = new SearchScrollQueryAndFetchAsyncAction(logger, clusterService, searchTransportService,
|
||||
searchPhaseController, request, scrollId, listener);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized");
|
||||
}
|
||||
action.start();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
@ -1,158 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchDfsQueryAndFetchAction extends TransportSearchTypeAction {
|
||||
|
||||
@Inject
|
||||
public TransportSearchDfsQueryAndFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(searchRequest, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends BaseAsyncAction<DfsSearchResult> {
|
||||
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
|
||||
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(request, listener);
|
||||
queryFetchResults = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "dfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<DfsSearchResult> listener) {
|
||||
searchService.sendExecuteDfs(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
|
||||
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
|
||||
}
|
||||
}
|
||||
|
||||
void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, final DiscoveryNode node, final QuerySearchRequest querySearchRequest) {
|
||||
searchService.sendExecuteFetch(node, querySearchRequest, new ActionListener<QueryFetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QueryFetchSearchResult result) {
|
||||
result.shardTarget(dfsResult.shardTarget());
|
||||
queryFetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
onSecondPhaseFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected execution)
|
||||
// and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), node);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onSecondPhaseFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("query_fetch", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
}
|
||||
}
|
@ -1,239 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeAction {
|
||||
|
||||
@Inject
|
||||
public TransportSearchDfsQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(searchRequest, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends BaseAsyncAction<DfsSearchResult> {
|
||||
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(request, listener);
|
||||
queryResults = new AtomicArray<>(firstResults.length());
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
docIdsToLoad = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "dfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<DfsSearchResult> listener) {
|
||||
searchService.sendExecuteDfs(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
executeQuery(entry.index, dfsResult, counter, querySearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeQuery(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, final QuerySearchRequest querySearchRequest, final DiscoveryNode node) {
|
||||
searchService.sendExecuteQuery(node, querySearchRequest, new ActionListener<QuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QuerySearchResult result) {
|
||||
result.shardTarget(dfsResult.shardTarget());
|
||||
queryResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
executeFetchPhase();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
onQueryFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected execution)
|
||||
// and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), node);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onQueryFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", buildShardFailures()));
|
||||
} else {
|
||||
executeFetchPhase();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetchPhase() {
|
||||
try {
|
||||
innerExecuteFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("query", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
void innerExecuteFetchPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, queryResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResult queryResult = queryResults.get(entry.index);
|
||||
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard);
|
||||
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
|
||||
searchService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(shardTarget);
|
||||
fetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context. by setting docIdsToLoad to null, the context will be cleared
|
||||
// in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done.
|
||||
docIdsToLoad.set(shardIndex, null);
|
||||
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
} finally {
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
}
|
||||
}
|
@ -1,104 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.buildScrollId;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchQueryAndFetchAction extends TransportSearchTypeAction {
|
||||
|
||||
@Inject
|
||||
public TransportSearchQueryAndFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(searchRequest, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends BaseAsyncAction<QueryFetchSearchResult> {
|
||||
|
||||
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "query_fetch";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<QueryFetchSearchResult> listener) {
|
||||
searchService.sendExecuteFetch(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
firstResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
@ -1,173 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchQueryThenFetchAction extends TransportSearchTypeAction {
|
||||
|
||||
@Inject
|
||||
public TransportSearchQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(searchRequest, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends BaseAsyncAction<QuerySearchResultProvider> {
|
||||
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(request, listener);
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
docIdsToLoad = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "query";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<QuerySearchResultProvider> listener) {
|
||||
searchService.sendExecuteQuery(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResultProvider queryResult = firstResults.get(entry.index);
|
||||
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), entry, lastEmittedDocPerShard);
|
||||
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
|
||||
searchService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(shardTarget);
|
||||
fetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context. by setting docIdsToLoad to null, the context will be cleared
|
||||
// in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done.
|
||||
docIdsToLoad.set(shardIndex, null);
|
||||
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
} finally {
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
@ -1,205 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final SearchServiceTransportAction searchService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollQueryAndFetchAction(Settings settings, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.searchService = searchService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
public void execute(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(request, scrollId, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final SearchScrollRequest request;
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
private final ParsedScrollId scrollId;
|
||||
private final DiscoveryNodes nodes;
|
||||
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
|
||||
private final AtomicInteger successfulOps;
|
||||
private final AtomicInteger counter;
|
||||
|
||||
private AsyncAction(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
this.counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
this.queryFetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executePhase(i, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (ScrollIdForNode target : scrollId.getContext()) {
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchService.sendExecuteFetch(node, internalRequest, new ActionListener<ScrollQueryFetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(ScrollQueryFetchSearchResult result) {
|
||||
queryFetchResults.set(shardIndex, result.result());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onPhaseFailure(t, searchId, shardIndex);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void onPhaseFailure(Throwable t, long searchId, int shardIndex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, searchId);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(t));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query_fetch", "all shards failed", t, buildShardFailures()));
|
||||
} else {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() throws Exception {
|
||||
ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
||||
}
|
@ -1,255 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchRequest;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final SearchServiceTransportAction searchService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollQueryThenFetchAction(Settings settings, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.searchService = searchService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
public void execute(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(request, scrollId, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final SearchScrollRequest request;
|
||||
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
|
||||
private final ParsedScrollId scrollId;
|
||||
|
||||
private final DiscoveryNodes nodes;
|
||||
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
|
||||
private volatile ScoreDoc[] sortedShardList;
|
||||
|
||||
private final AtomicInteger successfulOps;
|
||||
|
||||
private AsyncAction(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
this.queryResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
this.fetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
final AtomicInteger counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executeQueryPhase(i, counter, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchService.sendExecuteQuery(node, internalRequest, new ActionListener<ScrollQuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(ScrollQuerySearchResult result) {
|
||||
queryResults.set(shardIndex, result.queryResult());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onQueryPhaseFailure(shardIndex, counter, searchId, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Throwable t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, searchId);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(t));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", t, buildShardFailures()));
|
||||
} else {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeFetchPhase() throws Exception {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryResults);
|
||||
AtomicArray<IntArrayList> docIdsToLoad = new AtomicArray<>(queryResults.length());
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(sortedShardList, queryResults.length());
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
IntArrayList docIds = entry.value;
|
||||
final QuerySearchResult querySearchResult = queryResults.get(entry.index);
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc);
|
||||
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
|
||||
searchService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(querySearchResult.shardTarget());
|
||||
fetchResults.set(entry.index, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Failed to execute fetch phase", t);
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() {
|
||||
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
||||
}
|
@ -1,406 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.internalSearchRequest;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class TransportSearchTypeAction extends TransportAction<SearchRequest, SearchResponse> {
|
||||
|
||||
protected final ClusterService clusterService;
|
||||
|
||||
protected final SearchServiceTransportAction searchService;
|
||||
|
||||
protected final SearchPhaseController searchPhaseController;
|
||||
|
||||
public TransportSearchTypeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchAction.NAME, threadPool, actionFilters, indexNameExpressionResolver, clusterService.getTaskManager());
|
||||
this.clusterService = clusterService;
|
||||
this.searchService = searchService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
protected abstract class BaseAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
|
||||
|
||||
protected final ActionListener<SearchResponse> listener;
|
||||
|
||||
protected final GroupShardsIterator shardsIts;
|
||||
|
||||
protected final SearchRequest request;
|
||||
|
||||
protected final ClusterState clusterState;
|
||||
protected final DiscoveryNodes nodes;
|
||||
|
||||
protected final int expectedSuccessfulOps;
|
||||
private final int expectedTotalOps;
|
||||
|
||||
protected final AtomicInteger successfulOps = new AtomicInteger();
|
||||
private final AtomicInteger totalOps = new AtomicInteger();
|
||||
|
||||
protected final AtomicArray<FirstResult> firstResults;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final Object shardFailuresMutex = new Object();
|
||||
protected volatile ScoreDoc[] sortedShardList;
|
||||
|
||||
protected BaseAsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
|
||||
this.clusterState = clusterService.state();
|
||||
nodes = clusterState.nodes();
|
||||
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
|
||||
|
||||
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
|
||||
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
|
||||
// of just for the _search api
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request.indicesOptions(), startTime(), request.indices());
|
||||
|
||||
for (String index : concreteIndices) {
|
||||
clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index);
|
||||
}
|
||||
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices());
|
||||
|
||||
shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
|
||||
expectedSuccessfulOps = shardsIts.size();
|
||||
// we need to add 1 for non active partition, since we count it in the total!
|
||||
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
|
||||
|
||||
firstResults = new AtomicArray<>(shardsIts.size());
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (expectedSuccessfulOps == 0) {
|
||||
// no search shards to search on, bail with empty response (it happens with search across _all with no indices around and consistent with broadcast operations)
|
||||
listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, buildTookInMillis(), ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
int shardIndex = -1;
|
||||
for (final ShardIterator shardIt : shardsIts) {
|
||||
shardIndex++;
|
||||
final ShardRouting shard = shardIt.nextOrNull();
|
||||
if (shard != null) {
|
||||
performFirstPhase(shardIndex, shardIt, shard);
|
||||
} else {
|
||||
// really, no shards active in this group
|
||||
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void performFirstPhase(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) {
|
||||
if (shard == null) {
|
||||
// no more active shards... (we should not really get here, but just for safety)
|
||||
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
final DiscoveryNode node = nodes.get(shard.currentNodeId());
|
||||
if (node == null) {
|
||||
onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterState, shard.index().getName(), request.indices());
|
||||
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, startTime()), new ActionListener<FirstResult>() {
|
||||
@Override
|
||||
public void onResponse(FirstResult result) {
|
||||
onFirstPhaseResult(shardIndex, shard, result, shardIt);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onFirstPhaseResult(shardIndex, shard, node.id(), shardIt, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result, ShardIterator shardIt) {
|
||||
result.shardTarget(new SearchShardTarget(shard.currentNodeId(), shard.index(), shard.id()));
|
||||
processFirstPhaseResult(shardIndex, result);
|
||||
// we need to increment successful ops first before we compare the exit condition otherwise if we
|
||||
// are fast we could concurrently update totalOps but then preempt one of the threads which can
|
||||
// cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
|
||||
successfulOps.incrementAndGet();
|
||||
// increment all the "future" shards to update the total ops since we some may work and some may not...
|
||||
// and when that happens, we break on total ops, so we must maintain them
|
||||
final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
|
||||
if (xTotalOps == expectedTotalOps) {
|
||||
try {
|
||||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e);
|
||||
}
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
|
||||
}
|
||||
} else if (xTotalOps > expectedTotalOps) {
|
||||
raiseEarlyFailure(new IllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared to expected [" + expectedTotalOps + "]"));
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId, final ShardIterator shardIt, Throwable t) {
|
||||
// we always add the shard failure for a specific shard instance
|
||||
// we do make sure to clean it on a successful response from a shard
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId().getIndex(), shardIt.shardId().getId());
|
||||
addShardFailure(shardIndex, shardTarget, t);
|
||||
|
||||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
if (shard != null) {
|
||||
logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t);
|
||||
} else {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
|
||||
}
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("{}: Failed to execute [{}]", t, shard, request);
|
||||
}
|
||||
}
|
||||
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
|
||||
if (successfulOps.get() == 0) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("All shards failed for phase: [{}]", t, firstPhaseName());
|
||||
}
|
||||
|
||||
// no successful ops, raise an exception
|
||||
raiseEarlyFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", t, shardSearchFailures));
|
||||
} else {
|
||||
try {
|
||||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, shardSearchFailures));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
final ShardRouting nextShard = shardIt.nextOrNull();
|
||||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
}
|
||||
if (!lastShard) {
|
||||
try {
|
||||
performFirstPhase(shardIndex, shardIt, nextShard);
|
||||
} catch (Throwable t1) {
|
||||
onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, t1);
|
||||
}
|
||||
} else {
|
||||
// no more shards active, add a failure
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
logger.debug(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String executionFailureMsg(@Nullable ShardRouting shard, final ShardIterator shardIt, SearchRequest request, boolean lastShard) {
|
||||
if (shard != null) {
|
||||
return shard.shortSummary() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
} else {
|
||||
return shardIt.shardId() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
}
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
protected final void addShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Throwable t) {
|
||||
// we don't aggregate shard failures on non active shards (but do keep the header counts right)
|
||||
if (TransportActions.isShardNotAvailableException(t)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures)
|
||||
if (shardFailures == null) {
|
||||
synchronized (shardFailuresMutex) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(shardsIts.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
ShardSearchFailure failure = shardFailures.get(shardIndex);
|
||||
if (failure == null) {
|
||||
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
|
||||
} else {
|
||||
// the failure is already present, try and not override it with an exception that is less meaningless
|
||||
// for example, getting illegal shard state
|
||||
if (TransportActions.isReadOverrideException(t)) {
|
||||
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void raiseEarlyFailure(Throwable t) {
|
||||
for (AtomicArray.Entry<FirstResult> entry : firstResults.asList()) {
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.shardTarget().nodeId());
|
||||
sendReleaseSearchContext(entry.value.id(), node);
|
||||
} catch (Throwable t1) {
|
||||
logger.trace("failed to release context", t1);
|
||||
}
|
||||
}
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases shard targets that are not used in the docsIdsToLoad.
|
||||
*/
|
||||
protected void releaseIrrelevantSearchContexts(AtomicArray<? extends QuerySearchResultProvider> queryResults,
|
||||
AtomicArray<IntArrayList> docIdsToLoad) {
|
||||
if (docIdsToLoad == null) {
|
||||
return;
|
||||
}
|
||||
// we only release search context that we did not fetch from if we are not scrolling
|
||||
if (request.scroll() == null) {
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults.asList()) {
|
||||
final TopDocs topDocs = entry.value.queryResult().queryResult().topDocs();
|
||||
if (topDocs != null && topDocs.scoreDocs.length > 0 // the shard had matches
|
||||
&& docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId());
|
||||
sendReleaseSearchContext(entry.value.queryResult().id(), node);
|
||||
} catch (Throwable t1) {
|
||||
logger.trace("failed to release context", t1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void sendReleaseSearchContext(long contextId, DiscoveryNode node) {
|
||||
if (node != null) {
|
||||
searchService.sendFreeContext(node, contextId, request);
|
||||
}
|
||||
}
|
||||
|
||||
protected ShardFetchSearchRequest createFetchRequest(QuerySearchResult queryResult, AtomicArray.Entry<IntArrayList> entry, ScoreDoc[] lastEmittedDocPerShard) {
|
||||
if (lastEmittedDocPerShard != null) {
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value, lastEmittedDoc);
|
||||
} else {
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value);
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<FirstResult> listener);
|
||||
|
||||
protected final void processFirstPhaseResult(int shardIndex, FirstResult result) {
|
||||
firstResults.set(shardIndex, result);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null);
|
||||
}
|
||||
|
||||
// clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
|
||||
// so its ok concurrency wise to miss potentially the shard failures being created because of another failure
|
||||
// in the #addShardFailure, because by definition, it will happen on *another* shardIndex
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures != null) {
|
||||
shardFailures.set(shardIndex, null);
|
||||
}
|
||||
}
|
||||
|
||||
final void innerMoveToSecondPhase() throws Exception {
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
boolean hadOne = false;
|
||||
for (int i = 0; i < firstResults.length(); i++) {
|
||||
FirstResult result = firstResults.get(i);
|
||||
if (result == null) {
|
||||
continue; // failure
|
||||
}
|
||||
if (hadOne) {
|
||||
sb.append(",");
|
||||
} else {
|
||||
hadOne = true;
|
||||
}
|
||||
sb.append(result.shardTarget());
|
||||
}
|
||||
|
||||
logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterState.version());
|
||||
}
|
||||
moveToSecondPhase();
|
||||
}
|
||||
|
||||
protected abstract void moveToSecondPhase() throws Exception;
|
||||
|
||||
protected abstract String firstPhaseName();
|
||||
}
|
||||
}
|
@ -143,7 +143,7 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
|
||||
throw new IllegalArgumentException("suggest content missing");
|
||||
}
|
||||
final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(),
|
||||
indexService.fieldData(), request.shardId().getIndexName(), request.shardId().id());
|
||||
indexService.fieldData(), request.shardId());
|
||||
final Suggest result = suggestPhase.execute(context, searcher.searcher());
|
||||
return new ShardSuggestResponse(request.shardId(), result);
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@ -31,40 +32,35 @@ import java.io.IOException;
|
||||
*/
|
||||
public abstract class ChildTaskActionRequest<Request extends ActionRequest<Request>> extends ActionRequest<Request> {
|
||||
|
||||
private String parentTaskNode;
|
||||
|
||||
private long parentTaskId;
|
||||
private TaskId parentTaskId = TaskId.EMPTY_TASK_ID;
|
||||
|
||||
protected ChildTaskActionRequest() {
|
||||
|
||||
}
|
||||
|
||||
public void setParentTask(String parentTaskNode, long parentTaskId) {
|
||||
this.parentTaskNode = parentTaskNode;
|
||||
this.parentTaskId = parentTaskId;
|
||||
this.parentTaskId = new TaskId(parentTaskNode, parentTaskId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
parentTaskNode = in.readOptionalString();
|
||||
parentTaskId = in.readLong();
|
||||
parentTaskId = new TaskId(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeOptionalString(parentTaskNode);
|
||||
out.writeLong(parentTaskId);
|
||||
parentTaskId.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Task createTask(long id, String type, String action) {
|
||||
return createTask(id, type, action, parentTaskNode, parentTaskId);
|
||||
return createTask(id, type, action, parentTaskId);
|
||||
}
|
||||
|
||||
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
|
||||
return new Task(id, type, action, getDescription(), parentTaskNode, parentTaskId);
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return new Task(id, type, action, getDescription(), parentTaskId);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.support;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -31,38 +32,33 @@ import java.io.IOException;
|
||||
*/
|
||||
public class ChildTaskRequest extends TransportRequest {
|
||||
|
||||
private String parentTaskNode;
|
||||
|
||||
private long parentTaskId;
|
||||
private TaskId parentTaskId = TaskId.EMPTY_TASK_ID;
|
||||
|
||||
protected ChildTaskRequest() {
|
||||
}
|
||||
|
||||
public void setParentTask(String parentTaskNode, long parentTaskId) {
|
||||
this.parentTaskNode = parentTaskNode;
|
||||
this.parentTaskId = parentTaskId;
|
||||
this.parentTaskId = new TaskId(parentTaskNode, parentTaskId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
parentTaskNode = in.readOptionalString();
|
||||
parentTaskId = in.readLong();
|
||||
parentTaskId = new TaskId(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeOptionalString(parentTaskNode);
|
||||
out.writeLong(parentTaskId);
|
||||
parentTaskId.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Task createTask(long id, String type, String action) {
|
||||
return createTask(id, type, action, parentTaskNode, parentTaskId);
|
||||
return createTask(id, type, action, parentTaskId);
|
||||
}
|
||||
|
||||
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
|
||||
return new Task(id, type, action, getDescription(), parentTaskNode, parentTaskId);
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return new Task(id, type, action, getDescription(), parentTaskId);
|
||||
}
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskListener;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
@ -72,6 +73,13 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
||||
* This is a typical behavior.
|
||||
*/
|
||||
public final Task execute(Request request, ActionListener<Response> listener) {
|
||||
/*
|
||||
* While this version of execute could delegate to the TaskListener
|
||||
* version of execute that'd add yet another layer of wrapping on the
|
||||
* listener and prevent us from using the listener bare if there isn't a
|
||||
* task. That just seems like too many objects. Thus the two versions of
|
||||
* this method.
|
||||
*/
|
||||
Task task = taskManager.register("transport", actionName, request);
|
||||
if (task == null) {
|
||||
execute(null, request, listener);
|
||||
@ -93,11 +101,32 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
||||
return task;
|
||||
}
|
||||
|
||||
public final Task execute(Request request, TaskListener<Response> listener) {
|
||||
Task task = taskManager.register("transport", actionName, request);
|
||||
execute(task, request, new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
if (task != null) {
|
||||
taskManager.unregister(task);
|
||||
}
|
||||
listener.onResponse(task, response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
if (task != null) {
|
||||
taskManager.unregister(task);
|
||||
}
|
||||
listener.onFailure(task, e);
|
||||
}
|
||||
});
|
||||
return task;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use this method when the transport action should continue to run in the context of the current task
|
||||
*/
|
||||
public final void execute(Task task, Request request, ActionListener<Response> listener) {
|
||||
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
if (validationException != null) {
|
||||
listener.onFailure(validationException);
|
||||
|
@ -110,7 +110,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||
}
|
||||
|
||||
|
||||
private class AsyncAction {
|
||||
class AsyncAction {
|
||||
|
||||
private final NodesRequest request;
|
||||
private final String[] nodesIds;
|
||||
@ -120,7 +120,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||
private final AtomicInteger counter = new AtomicInteger();
|
||||
private final Task task;
|
||||
|
||||
private AsyncAction(Task task, NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
AsyncAction(Task task, NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
this.task = task;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
@ -135,7 +135,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||
this.responses = new AtomicReferenceArray<>(this.nodesIds.length);
|
||||
}
|
||||
|
||||
private void start() {
|
||||
void start() {
|
||||
if (nodesIds.length == 0) {
|
||||
// nothing to notify
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
@ -158,11 +158,6 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||
try {
|
||||
if (node == null) {
|
||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||
} else if (!clusterService.localNode().shouldConnectTo(node) && !clusterService.localNode().equals(node)) {
|
||||
// the check "!clusterService.localNode().equals(node)" is to maintain backward comp. where before
|
||||
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we need to fix
|
||||
// those (and they randomize the client node usage, so tricky to find when)
|
||||
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
|
||||
} else {
|
||||
ChildTaskRequest nodeRequest = newNodeRequest(nodeId, request);
|
||||
if (task != null) {
|
||||
|
@ -30,6 +30,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
@ -216,8 +217,8 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
|
||||
return new ReplicationTask(id, type, action, getDescription(), parentTaskNode, parentTaskId);
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return new ReplicationTask(id, type, action, getDescription(), parentTaskId);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -19,11 +19,11 @@
|
||||
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.elasticsearch.common.inject.Provider;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@ -35,8 +35,8 @@ import static java.util.Objects.requireNonNull;
|
||||
public class ReplicationTask extends Task {
|
||||
private volatile String phase = "starting";
|
||||
|
||||
public ReplicationTask(long id, String type, String action, String description, String parentNode, long parentId) {
|
||||
super(id, type, action, description, parentNode, parentId);
|
||||
public ReplicationTask(long id, String type, String action, String description, TaskId parentTaskId) {
|
||||
super(id, type, action, description, parentTaskId);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -53,6 +53,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
@ -83,6 +84,7 @@ import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
@ -371,18 +373,18 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
}
|
||||
|
||||
private void failReplicaIfNeeded(Throwable t) {
|
||||
String index = request.shardId().getIndex().getName();
|
||||
Index index = request.shardId().getIndex();
|
||||
int shardId = request.shardId().id();
|
||||
logger.trace("failure on replica [{}][{}], action [{}], request [{}]", t, index, shardId, actionName, request);
|
||||
if (ignoreReplicaException(t) == false) {
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId);
|
||||
logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
|
||||
return;
|
||||
}
|
||||
IndexShard indexShard = indexService.getShardOrNull(shardId);
|
||||
if (indexShard == null) {
|
||||
logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId);
|
||||
logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
|
||||
return;
|
||||
}
|
||||
indexShard.failShard(actionName + " failed on replica", t);
|
||||
@ -461,60 +463,90 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
protected void doRun() {
|
||||
setPhase(task, "routing");
|
||||
final ClusterState state = observer.observedState();
|
||||
ClusterBlockException blockException = state.blocks().globalBlockedException(globalBlockLevel());
|
||||
if (blockException != null) {
|
||||
handleBlockException(blockException);
|
||||
return;
|
||||
}
|
||||
final String concreteIndex = resolveIndex() ? indexNameExpressionResolver.concreteSingleIndex(state, request) : request.index();
|
||||
blockException = state.blocks().indexBlockedException(indexBlockLevel(), concreteIndex);
|
||||
if (blockException != null) {
|
||||
handleBlockException(blockException);
|
||||
if (handleBlockExceptions(state)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// request does not have a shardId yet, we need to pass the concrete index to resolve shardId
|
||||
final String concreteIndex = concreteIndex(state);
|
||||
resolveRequest(state.metaData(), concreteIndex, request);
|
||||
assert request.shardId() != null : "request shardId must be set in resolveRequest";
|
||||
|
||||
IndexShardRoutingTable indexShard = state.getRoutingTable().shardRoutingTable(request.shardId());
|
||||
final ShardRouting primary = indexShard.primaryShard();
|
||||
if (primary == null || primary.active() == false) {
|
||||
logger.trace("primary shard [{}] is not yet active, scheduling a retry: action [{}], request [{}], cluster state version [{}]", request.shardId(), actionName, request, state.version());
|
||||
retryBecauseUnavailable(request.shardId(), "primary shard is not active");
|
||||
return;
|
||||
}
|
||||
if (state.nodes().nodeExists(primary.currentNodeId()) == false) {
|
||||
logger.trace("primary shard [{}] is assigned to an unknown node [{}], scheduling a retry: action [{}], request [{}], cluster state version [{}]", request.shardId(), primary.currentNodeId(), actionName, request, state.version());
|
||||
retryBecauseUnavailable(request.shardId(), "primary shard isn't assigned to a known node.");
|
||||
final ShardRouting primary = primary(state);
|
||||
if (retryIfUnavailable(state, primary)) {
|
||||
return;
|
||||
}
|
||||
final DiscoveryNode node = state.nodes().get(primary.currentNodeId());
|
||||
taskManager.registerChildTask(task, node.getId());
|
||||
if (primary.currentNodeId().equals(state.nodes().localNodeId())) {
|
||||
setPhase(task, "waiting_on_primary");
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ", transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId());
|
||||
}
|
||||
performAction(node, transportPrimaryAction, true);
|
||||
performLocalAction(state, primary, node);
|
||||
} else {
|
||||
if (state.version() < request.routedBasedOnClusterVersion()) {
|
||||
logger.trace("failed to find primary [{}] for request [{}] despite sender thinking it would be here. Local cluster state version [{}]] is older than on sending node (version [{}]), scheduling a retry...", request.shardId(), request, state.version(), request.routedBasedOnClusterVersion());
|
||||
retryBecauseUnavailable(request.shardId(), "failed to find primary as current cluster state with version [" + state.version() + "] is stale (expected at least [" + request.routedBasedOnClusterVersion() + "]");
|
||||
return;
|
||||
} else {
|
||||
// chasing the node with the active primary for a second hop requires that we are at least up-to-date with the current cluster state version
|
||||
// this prevents redirect loops between two nodes when a primary was relocated and the relocation target is not aware that it is the active primary shard already.
|
||||
request.routedBasedOnClusterVersion(state.version());
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}]", actionName, request.shardId(), request, state.version(), primary.currentNodeId());
|
||||
}
|
||||
setPhase(task, "rerouted");
|
||||
performAction(node, actionName, false);
|
||||
performRemoteAction(state, primary, node);
|
||||
}
|
||||
}
|
||||
|
||||
private void performLocalAction(ClusterState state, ShardRouting primary, DiscoveryNode node) {
|
||||
setPhase(task, "waiting_on_primary");
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ", transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId());
|
||||
}
|
||||
performAction(node, transportPrimaryAction, true);
|
||||
}
|
||||
|
||||
private void performRemoteAction(ClusterState state, ShardRouting primary, DiscoveryNode node) {
|
||||
if (state.version() < request.routedBasedOnClusterVersion()) {
|
||||
logger.trace("failed to find primary [{}] for request [{}] despite sender thinking it would be here. Local cluster state version [{}]] is older than on sending node (version [{}]), scheduling a retry...", request.shardId(), request, state.version(), request.routedBasedOnClusterVersion());
|
||||
retryBecauseUnavailable(request.shardId(), "failed to find primary as current cluster state with version [" + state.version() + "] is stale (expected at least [" + request.routedBasedOnClusterVersion() + "]");
|
||||
return;
|
||||
} else {
|
||||
// chasing the node with the active primary for a second hop requires that we are at least up-to-date with the current cluster state version
|
||||
// this prevents redirect loops between two nodes when a primary was relocated and the relocation target is not aware that it is the active primary shard already.
|
||||
request.routedBasedOnClusterVersion(state.version());
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}]", actionName, request.shardId(), request, state.version(), primary.currentNodeId());
|
||||
}
|
||||
setPhase(task, "rerouted");
|
||||
performAction(node, actionName, false);
|
||||
}
|
||||
|
||||
private boolean retryIfUnavailable(ClusterState state, ShardRouting primary) {
|
||||
if (primary == null || primary.active() == false) {
|
||||
logger.trace("primary shard [{}] is not yet active, scheduling a retry: action [{}], request [{}], cluster state version [{}]", request.shardId(), actionName, request, state.version());
|
||||
retryBecauseUnavailable(request.shardId(), "primary shard is not active");
|
||||
return true;
|
||||
}
|
||||
if (state.nodes().nodeExists(primary.currentNodeId()) == false) {
|
||||
logger.trace("primary shard [{}] is assigned to an unknown node [{}], scheduling a retry: action [{}], request [{}], cluster state version [{}]", request.shardId(), primary.currentNodeId(), actionName, request, state.version());
|
||||
retryBecauseUnavailable(request.shardId(), "primary shard isn't assigned to a known node.");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private String concreteIndex(ClusterState state) {
|
||||
return resolveIndex() ? indexNameExpressionResolver.concreteSingleIndex(state, request) : request.index();
|
||||
}
|
||||
|
||||
private ShardRouting primary(ClusterState state) {
|
||||
IndexShardRoutingTable indexShard = state.getRoutingTable().shardRoutingTable(request.shardId());
|
||||
return indexShard.primaryShard();
|
||||
}
|
||||
|
||||
private boolean handleBlockExceptions(ClusterState state) {
|
||||
ClusterBlockException blockException = state.blocks().globalBlockedException(globalBlockLevel());
|
||||
if (blockException != null) {
|
||||
handleBlockException(blockException);
|
||||
return true;
|
||||
}
|
||||
blockException = state.blocks().indexBlockedException(indexBlockLevel(), concreteIndex(state));
|
||||
if (blockException != null) {
|
||||
handleBlockException(blockException);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private void handleBlockException(ClusterBlockException blockException) {
|
||||
if (blockException.retryable()) {
|
||||
logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage());
|
||||
@ -677,28 +709,35 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
// closed in finishAsFailed(e) in the case of error
|
||||
indexShardReference = getIndexShardReferenceOnPrimary(shardId);
|
||||
if (indexShardReference.isRelocated() == false) {
|
||||
// execute locally
|
||||
Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(state.metaData(), request);
|
||||
primaryResponse.v2().primaryTerm(indexShardReference.opPrimaryTerm());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
|
||||
}
|
||||
ReplicationPhase replicationPhase = new ReplicationPhase(task, primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
|
||||
finishAndMoveToReplication(replicationPhase);
|
||||
executeLocally();
|
||||
} else {
|
||||
// delegate primary phase to relocation target
|
||||
// it is safe to execute primary phase on relocation target as there are no more in-flight operations where primary
|
||||
// phase is executed on local shard and all subsequent operations are executed on relocation target as primary phase.
|
||||
final ShardRouting primary = indexShardReference.routingEntry();
|
||||
indexShardReference.close();
|
||||
assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
|
||||
DiscoveryNode relocatingNode = state.nodes().get(primary.relocatingNodeId());
|
||||
transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions,
|
||||
TransportChannelResponseHandler.responseHandler(logger, TransportReplicationAction.this::newResponseInstance, channel,
|
||||
"rerouting indexing to target primary " + primary));
|
||||
executeRemotely();
|
||||
}
|
||||
}
|
||||
|
||||
private void executeLocally() throws Exception {
|
||||
// execute locally
|
||||
Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(state.metaData(), request);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
|
||||
}
|
||||
ReplicationPhase replicationPhase = new ReplicationPhase(task, primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
|
||||
finishAndMoveToReplication(replicationPhase);
|
||||
}
|
||||
|
||||
private void executeRemotely() {
|
||||
// delegate primary phase to relocation target
|
||||
// it is safe to execute primary phase on relocation target as there are no more in-flight operations where primary
|
||||
// phase is executed on local shard and all subsequent operations are executed on relocation target as primary phase.
|
||||
final ShardRouting primary = indexShardReference.routingEntry();
|
||||
indexShardReference.close();
|
||||
assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
|
||||
DiscoveryNode relocatingNode = state.nodes().get(primary.relocatingNodeId());
|
||||
transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions,
|
||||
TransportChannelResponseHandler.responseHandler(logger, TransportReplicationAction.this::newResponseInstance, channel,
|
||||
"rerouting indexing to target primary " + primary));
|
||||
}
|
||||
|
||||
/**
|
||||
* checks whether we can perform a write based on the write consistency setting
|
||||
* returns **null* if OK to proceed, or a string describing the reason to stop
|
||||
@ -836,23 +875,48 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
// to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then.
|
||||
// If the index gets deleted after primary operation, we skip replication
|
||||
final ClusterState state = clusterService.state();
|
||||
final IndexRoutingTable index = state.getRoutingTable().index(shardId.getIndex());
|
||||
final IndexShardRoutingTable shardRoutingTable = (index != null) ? index.shard(shardId.id()) : null;
|
||||
final IndexShardRoutingTable shardRoutingTable = state.getRoutingTable().shardRoutingTableOrNull(shardId);
|
||||
final IndexMetaData indexMetaData = state.getMetaData().index(shardId.getIndex());
|
||||
this.shards = (shardRoutingTable != null) ? shardRoutingTable.shards() : Collections.emptyList();
|
||||
this.executeOnReplica = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings());
|
||||
this.nodes = state.getNodes();
|
||||
List<ShardRouting> shards = shards(shardRoutingTable);
|
||||
boolean executeOnReplica = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings());
|
||||
DiscoveryNodes nodes = state.getNodes();
|
||||
|
||||
if (shards.isEmpty()) {
|
||||
logger.debug("replication phase for request [{}] on [{}] is skipped due to index deletion after primary operation", replicaRequest, shardId);
|
||||
}
|
||||
|
||||
// we calculate number of target nodes to send replication operations, including nodes with relocating shards
|
||||
AtomicInteger numberOfPendingShardInstances = new AtomicInteger();
|
||||
this.totalShards = countTotalAndPending(shards, executeOnReplica, nodes, numberOfPendingShardInstances);
|
||||
this.pending = numberOfPendingShardInstances;
|
||||
this.shards = shards;
|
||||
this.executeOnReplica = executeOnReplica;
|
||||
this.nodes = nodes;
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("replication phase started. pending [{}], action [{}], request [{}], cluster state version used [{}]", pending.get(),
|
||||
transportReplicaAction, replicaRequest, state.version());
|
||||
}
|
||||
}
|
||||
|
||||
private int countTotalAndPending(List<ShardRouting> shards, boolean executeOnReplica, DiscoveryNodes nodes, AtomicInteger pending) {
|
||||
assert pending.get() == 0;
|
||||
int numberOfIgnoredShardInstances = performOnShards(shards, executeOnReplica, nodes, shard -> pending.incrementAndGet(), shard -> pending.incrementAndGet());
|
||||
// one for the local primary copy
|
||||
return 1 + numberOfIgnoredShardInstances + pending.get();
|
||||
}
|
||||
|
||||
private int performOnShards(List<ShardRouting> shards, boolean executeOnReplica, DiscoveryNodes nodes, Consumer<ShardRouting> onLocalShard, Consumer<ShardRouting> onRelocatingShard) {
|
||||
int numberOfIgnoredShardInstances = 0;
|
||||
int numberOfPendingShardInstances = 0;
|
||||
for (ShardRouting shard : shards) {
|
||||
// the following logic to select the shards to replicate to is mirrored and explained in the doRun method below
|
||||
if (shard.primary() == false && executeOnReplica == false) {
|
||||
// If the replicas use shadow replicas, there is no reason to
|
||||
// perform the action on the replica, so skip it and
|
||||
// immediately return
|
||||
|
||||
// this delays mapping updates on replicas because they have
|
||||
// to wait until they get the new mapping through the cluster
|
||||
// state, which is why we recommend pre-defined mappings for
|
||||
// indices using shadow replicas
|
||||
numberOfIgnoredShardInstances++;
|
||||
continue;
|
||||
}
|
||||
@ -860,20 +924,26 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
numberOfIgnoredShardInstances++;
|
||||
continue;
|
||||
}
|
||||
// we index on a replica that is initializing as well since we might not have got the event
|
||||
// yet that it was started. We will get an exception IllegalShardState exception if its not started
|
||||
// and that's fine, we will ignore it
|
||||
|
||||
// we never execute replication operation locally as primary operation has already completed locally
|
||||
// hence, we ignore any local shard for replication
|
||||
if (nodes.localNodeId().equals(shard.currentNodeId()) == false) {
|
||||
numberOfPendingShardInstances++;
|
||||
onLocalShard.accept(shard);
|
||||
}
|
||||
// send operation to relocating shard
|
||||
// local shard can be a relocation target of a primary that is in relocated state
|
||||
if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
|
||||
numberOfPendingShardInstances++;
|
||||
onRelocatingShard.accept(shard);
|
||||
}
|
||||
}
|
||||
// one for the local primary copy
|
||||
this.totalShards = 1 + numberOfPendingShardInstances + numberOfIgnoredShardInstances;
|
||||
this.pending = new AtomicInteger(numberOfPendingShardInstances);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("replication phase started. pending [{}], action [{}], request [{}], cluster state version used [{}]", pending.get(),
|
||||
transportReplicaAction, replicaRequest, state.version());
|
||||
}
|
||||
return numberOfIgnoredShardInstances;
|
||||
}
|
||||
|
||||
private List<ShardRouting> shards(IndexShardRoutingTable shardRoutingTable) {
|
||||
return (shardRoutingTable != null) ? shardRoutingTable.shards() : Collections.emptyList();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -913,36 +983,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
doFinish();
|
||||
return;
|
||||
}
|
||||
for (ShardRouting shard : shards) {
|
||||
if (shard.primary() == false && executeOnReplica == false) {
|
||||
// If the replicas use shadow replicas, there is no reason to
|
||||
// perform the action on the replica, so skip it and
|
||||
// immediately return
|
||||
|
||||
// this delays mapping updates on replicas because they have
|
||||
// to wait until they get the new mapping through the cluster
|
||||
// state, which is why we recommend pre-defined mappings for
|
||||
// indices using shadow replicas
|
||||
continue;
|
||||
}
|
||||
if (shard.unassigned()) {
|
||||
continue;
|
||||
}
|
||||
// we index on a replica that is initializing as well since we might not have got the event
|
||||
// yet that it was started. We will get an exception IllegalShardState exception if its not started
|
||||
// and that's fine, we will ignore it
|
||||
|
||||
// we never execute replication operation locally as primary operation has already completed locally
|
||||
// hence, we ignore any local shard for replication
|
||||
if (nodes.localNodeId().equals(shard.currentNodeId()) == false) {
|
||||
performOnReplica(shard);
|
||||
}
|
||||
// send operation to relocating shard
|
||||
// local shard can be a relocation target of a primary that is in relocated state
|
||||
if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
|
||||
performOnReplica(shard.buildTargetRelocatingShard());
|
||||
}
|
||||
}
|
||||
performOnShards(shards, executeOnReplica, nodes, shard -> performOnReplica(shard), shard -> performOnReplica(shard.buildTargetRelocatingShard()));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -27,6 +27,7 @@ import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
@ -42,8 +43,8 @@ public abstract class InstanceShardOperationRequest<Request extends InstanceShar
|
||||
protected TimeValue timeout = DEFAULT_TIMEOUT;
|
||||
|
||||
protected String index;
|
||||
// -1 means its not set, allows to explicitly direct a request to a specific shard
|
||||
protected int shardId = -1;
|
||||
// null means its not set, allows to explicitly direct a request to a specific shard
|
||||
protected ShardId shardId = null;
|
||||
|
||||
private String concreteIndex;
|
||||
|
||||
@ -115,7 +116,11 @@ public abstract class InstanceShardOperationRequest<Request extends InstanceShar
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
index = in.readString();
|
||||
shardId = in.readInt();
|
||||
if (in.readBoolean()) {
|
||||
shardId = ShardId.readShardId(in);
|
||||
} else {
|
||||
shardId = null;
|
||||
}
|
||||
timeout = TimeValue.readTimeValue(in);
|
||||
concreteIndex = in.readOptionalString();
|
||||
}
|
||||
@ -124,7 +129,7 @@ public abstract class InstanceShardOperationRequest<Request extends InstanceShar
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(index);
|
||||
out.writeInt(shardId);
|
||||
out.writeOptionalStreamable(shardId);
|
||||
timeout.writeTo(out);
|
||||
out.writeOptionalString(concreteIndex);
|
||||
}
|
||||
|
@ -35,10 +35,8 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||
@ -91,11 +89,11 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, Request request) {
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.concreteIndex());
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the request. If the resolve means a different execution, then return false
|
||||
* here to indicate not to continue and execute this request.
|
||||
* Resolves the request. Throws an exception if the request cannot be resolved.
|
||||
*/
|
||||
protected abstract boolean resolveRequest(ClusterState state, Request request, ActionListener<Response> listener);
|
||||
protected abstract void resolveRequest(ClusterState state, Request request);
|
||||
|
||||
protected boolean retryOnFailure(Throwable e) {
|
||||
return false;
|
||||
@ -141,11 +139,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
||||
}
|
||||
}
|
||||
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request));
|
||||
// check if we need to execute, and if not, return
|
||||
if (!resolveRequest(observer.observedState(), request, listener)) {
|
||||
listener.onFailure(new IllegalStateException(LoggerMessageFormat.format("[{}][{}] request {} could not be resolved",request.index, request.shardId, actionName)));
|
||||
return;
|
||||
}
|
||||
resolveRequest(observer.observedState(), request);
|
||||
blockException = checkRequestBlock(observer.observedState(), request);
|
||||
if (blockException != null) {
|
||||
if (blockException.retryable()) {
|
||||
@ -178,7 +172,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
||||
return;
|
||||
}
|
||||
|
||||
request.shardId = shardIt.shardId().id();
|
||||
request.shardId = shardIt.shardId();
|
||||
DiscoveryNode node = nodes.get(shard.currentNodeId());
|
||||
transportService.sendRequest(node, shardActionName, request, transportOptions(), new BaseTransportResponseHandler<Response>() {
|
||||
|
||||
|
@ -35,7 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -27,9 +27,12 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* A base class for task requests
|
||||
*/
|
||||
@ -47,33 +50,28 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
||||
|
||||
private String[] actions = ALL_ACTIONS;
|
||||
|
||||
private String parentNode;
|
||||
private TaskId parentTaskId = TaskId.EMPTY_TASK_ID;
|
||||
|
||||
private long parentTaskId = ALL_TASKS;
|
||||
|
||||
private long taskId = ALL_TASKS;
|
||||
private TaskId taskId = TaskId.EMPTY_TASK_ID;
|
||||
|
||||
public BaseTasksRequest() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get information about tasks from nodes based on the nodes ids specified.
|
||||
* If none are passed, information for all nodes will be returned.
|
||||
*/
|
||||
public BaseTasksRequest(String... nodesIds) {
|
||||
this.nodesIds = nodesIds;
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (taskId.isSet() == false && nodesIds.length > 0) {
|
||||
validationException = addValidationError("task id cannot be used together with node ids",
|
||||
validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the list of action masks for the actions that should be returned
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request actions(String... actions) {
|
||||
public final Request setActions(String... actions) {
|
||||
this.actions = actions;
|
||||
return (Request) this;
|
||||
}
|
||||
@ -81,16 +79,16 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
||||
/**
|
||||
* Return the list of action masks for the actions that should be returned
|
||||
*/
|
||||
public String[] actions() {
|
||||
public String[] getActions() {
|
||||
return actions;
|
||||
}
|
||||
|
||||
public final String[] nodesIds() {
|
||||
public final String[] getNodesIds() {
|
||||
return nodesIds;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request nodesIds(String... nodesIds) {
|
||||
public final Request setNodesIds(String... nodesIds) {
|
||||
this.nodesIds = nodesIds;
|
||||
return (Request) this;
|
||||
}
|
||||
@ -100,56 +98,43 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
||||
*
|
||||
* By default tasks with any ids are returned.
|
||||
*/
|
||||
public long taskId() {
|
||||
public TaskId getTaskId() {
|
||||
return taskId;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request taskId(long taskId) {
|
||||
public final Request setTaskId(TaskId taskId) {
|
||||
this.taskId = taskId;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the parent node id that tasks should be filtered by
|
||||
*/
|
||||
public String parentNode() {
|
||||
return parentNode;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public Request parentNode(String parentNode) {
|
||||
this.parentNode = parentNode;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the parent task id that tasks should be filtered by
|
||||
*/
|
||||
public long parentTaskId() {
|
||||
public TaskId getParentTaskId() {
|
||||
return parentTaskId;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public Request parentTaskId(long parentTaskId) {
|
||||
public Request setParentTaskId(TaskId parentTaskId) {
|
||||
this.parentTaskId = parentTaskId;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
|
||||
public TimeValue timeout() {
|
||||
public TimeValue getTimeout() {
|
||||
return this.timeout;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request timeout(TimeValue timeout) {
|
||||
public final Request setTimeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request timeout(String timeout) {
|
||||
public final Request setTimeout(String timeout) {
|
||||
this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout");
|
||||
return (Request) this;
|
||||
}
|
||||
@ -157,11 +142,10 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
taskId = new TaskId(in);
|
||||
parentTaskId = new TaskId(in);
|
||||
nodesIds = in.readStringArray();
|
||||
taskId = in.readLong();
|
||||
actions = in.readStringArray();
|
||||
parentNode = in.readOptionalString();
|
||||
parentTaskId = in.readLong();
|
||||
if (in.readBoolean()) {
|
||||
timeout = TimeValue.readTimeValue(in);
|
||||
}
|
||||
@ -170,30 +154,24 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
taskId.writeTo(out);
|
||||
parentTaskId.writeTo(out);
|
||||
out.writeStringArrayNullable(nodesIds);
|
||||
out.writeLong(taskId);
|
||||
out.writeStringArrayNullable(actions);
|
||||
out.writeOptionalString(parentNode);
|
||||
out.writeLong(parentTaskId);
|
||||
out.writeOptionalStreamable(timeout);
|
||||
}
|
||||
|
||||
public boolean match(Task task) {
|
||||
if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) {
|
||||
if (getActions() != null && getActions().length > 0 && Regex.simpleMatch(getActions(), task.getAction()) == false) {
|
||||
return false;
|
||||
}
|
||||
if (taskId() != ALL_TASKS) {
|
||||
if(taskId() != task.getId()) {
|
||||
if (getTaskId().isSet() == false) {
|
||||
if(getTaskId().getId() != task.getId()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (parentNode() != null) {
|
||||
if (parentNode().equals(task.getParentNode()) == false) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (parentTaskId() != ALL_TASKS) {
|
||||
if (parentTaskId() != task.getParentId()) {
|
||||
if (parentTaskId.isSet() == false) {
|
||||
if (parentTaskId.equals(task.getParentTaskId()) == false) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -35,19 +35,19 @@ public class TasksRequestBuilder <Request extends BaseTasksRequest<Request>, Res
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setNodesIds(String... nodesIds) {
|
||||
request.nodesIds(nodesIds);
|
||||
request.setNodesIds(nodesIds);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setActions(String... actions) {
|
||||
request.actions(actions);
|
||||
request.setActions(actions);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setTimeout(TimeValue timeout) {
|
||||
request.timeout(timeout);
|
||||
request.setTimeout(timeout);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
}
|
||||
|
@ -124,32 +124,37 @@ public abstract class TransportTasksAction<
|
||||
}
|
||||
|
||||
protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) {
|
||||
return clusterState.nodes().resolveNodesIds(request.nodesIds());
|
||||
if (request.getTaskId().isSet()) {
|
||||
return clusterState.nodes().resolveNodesIds(request.getNodesIds());
|
||||
} else {
|
||||
return new String[]{request.getTaskId().getNodeId()};
|
||||
}
|
||||
}
|
||||
|
||||
protected void processTasks(TasksRequest request, Consumer<OperationTask> operation) {
|
||||
if (request.taskId() != BaseTasksRequest.ALL_TASKS) {
|
||||
if (request.getTaskId().isSet() == false) {
|
||||
// we are only checking one task, we can optimize it
|
||||
Task task = taskManager.getTask(request.taskId());
|
||||
Task task = taskManager.getTask(request.getTaskId().getId());
|
||||
if (task != null) {
|
||||
if (request.match(task)) {
|
||||
operation.accept((OperationTask) task);
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.taskId());
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.getTaskId());
|
||||
}
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] is missing", request.taskId());
|
||||
throw new ResourceNotFoundException("task [{}] is missing", request.getTaskId());
|
||||
}
|
||||
} else {
|
||||
for (Task task : taskManager.getTasks().values()) {
|
||||
if (request.match(task)) {
|
||||
operation.accept((OperationTask)task);
|
||||
operation.accept((OperationTask) task);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract TasksResponse newResponse(TasksRequest request, List<TaskResponse> tasks, List<TaskOperationFailure> taskOperationFailures, List<FailedNodeException> failedNodeExceptions);
|
||||
protected abstract TasksResponse newResponse(TasksRequest request, List<TaskResponse> tasks, List<TaskOperationFailure>
|
||||
taskOperationFailures, List<FailedNodeException> failedNodeExceptions);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected TasksResponse newResponse(TasksRequest request, AtomicReferenceArray responses) {
|
||||
@ -219,8 +224,8 @@ public abstract class TransportTasksAction<
|
||||
}
|
||||
} else {
|
||||
TransportRequestOptions.Builder builder = TransportRequestOptions.builder();
|
||||
if (request.timeout() != null) {
|
||||
builder.withTimeout(request.timeout());
|
||||
if (request.getTimeout() != null) {
|
||||
builder.withTimeout(request.getTimeout());
|
||||
}
|
||||
builder.withCompress(transportCompress());
|
||||
for (int i = 0; i < nodesIds.length; i++) {
|
||||
@ -230,36 +235,32 @@ public abstract class TransportTasksAction<
|
||||
try {
|
||||
if (node == null) {
|
||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||
} else if (!clusterService.localNode().shouldConnectTo(node) && !clusterService.localNode().equals(node)) {
|
||||
// the check "!clusterService.localNode().equals(node)" is to maintain backward comp. where before
|
||||
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we need to fix
|
||||
// those (and they randomize the client node usage, so tricky to find when)
|
||||
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
|
||||
} else {
|
||||
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
|
||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||
taskManager.registerChildTask(task, node.getId());
|
||||
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(), new BaseTransportResponseHandler<NodeTasksResponse>() {
|
||||
@Override
|
||||
public NodeTasksResponse newInstance() {
|
||||
return new NodeTasksResponse();
|
||||
}
|
||||
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(),
|
||||
new BaseTransportResponseHandler<NodeTasksResponse>() {
|
||||
@Override
|
||||
public NodeTasksResponse newInstance() {
|
||||
return new NodeTasksResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleResponse(NodeTasksResponse response) {
|
||||
onOperation(idx, response);
|
||||
}
|
||||
@Override
|
||||
public void handleResponse(NodeTasksResponse response) {
|
||||
onOperation(idx, response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
onFailure(idx, node.id(), exp);
|
||||
}
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
onFailure(idx, node.id(), exp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
});
|
||||
@Override
|
||||
public String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
onFailure(idx, nodeId, t);
|
||||
|
@ -75,12 +75,12 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
|
||||
|
||||
@Override
|
||||
protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, ShardId shardId) {
|
||||
MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
|
||||
final MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
|
||||
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
final IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
for (int i = 0; i < request.locations.size(); i++) {
|
||||
TermVectorsRequest termVectorsRequest = request.requests.get(i);
|
||||
try {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.index());
|
||||
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest);
|
||||
termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime());
|
||||
response.add(request.locations.get(i), termVectorsResponse);
|
||||
|
@ -38,6 +38,7 @@ import org.elasticsearch.action.support.single.instance.TransportInstanceSingleO
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.PlainShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
@ -50,6 +51,7 @@ import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
@ -99,13 +101,16 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean resolveRequest(ClusterState state, UpdateRequest request, ActionListener<UpdateResponse> listener) {
|
||||
request.routing((state.metaData().resolveIndexRouting(request.parent(), request.routing(), request.index())));
|
||||
protected void resolveRequest(ClusterState state, UpdateRequest request) {
|
||||
resolveAndValidateRouting(state.metaData(), request.concreteIndex(), request);
|
||||
}
|
||||
|
||||
public static void resolveAndValidateRouting(MetaData metaData, String concreteIndex, UpdateRequest request) {
|
||||
request.routing((metaData.resolveIndexRouting(request.parent(), request.routing(), request.index())));
|
||||
// Fail fast on the node that received the request, rather than failing when translating on the index or delete request.
|
||||
if (request.routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.type())) {
|
||||
throw new RoutingMissingException(request.concreteIndex(), request.type(), request.id());
|
||||
if (request.routing() == null && metaData.routingRequired(concreteIndex, request.type())) {
|
||||
throw new RoutingMissingException(concreteIndex, request.type(), request.id());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -143,8 +148,8 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
||||
|
||||
@Override
|
||||
protected ShardIterator shards(ClusterState clusterState, UpdateRequest request) {
|
||||
if (request.shardId() != -1) {
|
||||
return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId()).primaryShardIt();
|
||||
if (request.getShardId() != null) {
|
||||
return clusterState.routingTable().index(request.concreteIndex()).shard(request.getShardId().getId()).primaryShardIt();
|
||||
}
|
||||
ShardIterator shardIterator = clusterService.operationRouting()
|
||||
.indexShards(clusterState, request.concreteIndex(), request.type(), request.id(), request.routing());
|
||||
@ -163,8 +168,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
||||
}
|
||||
|
||||
protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) {
|
||||
final IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex());
|
||||
final IndexShard indexShard = indexService.getShard(request.shardId());
|
||||
final ShardId shardId = request.getShardId();
|
||||
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
final IndexShard indexShard = indexService.getShard(shardId.getId());
|
||||
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard);
|
||||
switch (result.operation()) {
|
||||
case UPSERT:
|
||||
@ -191,7 +197,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
||||
if (e instanceof VersionConflictEngineException) {
|
||||
if (retryCount < request.retryOnConflict()) {
|
||||
logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]",
|
||||
retryCount + 1, request.retryOnConflict(), request.index(), request.shardId(), request.id());
|
||||
retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id());
|
||||
threadPool.executor(executor()).execute(new ActionRunnable<UpdateResponse>(listener) {
|
||||
@Override
|
||||
protected void doRun() {
|
||||
@ -266,9 +272,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
||||
break;
|
||||
case NONE:
|
||||
UpdateResponse update = result.action();
|
||||
IndexService indexServiceOrNull = indicesService.indexService(request.concreteIndex());
|
||||
if (indexServiceOrNull != null) {
|
||||
IndexShard shard = indexService.getShardOrNull(request.shardId());
|
||||
IndexService indexServiceOrNull = indicesService.indexService(shardId.getIndex());
|
||||
if (indexServiceOrNull != null) {
|
||||
IndexShard shard = indexService.getShardOrNull(shardId.getId());
|
||||
if (shard != null) {
|
||||
shard.noopUpdate(request.type());
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptParameterParser;
|
||||
import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue;
|
||||
@ -88,7 +89,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
}
|
||||
|
||||
public UpdateRequest(String index, String type, String id) {
|
||||
this.index = index;
|
||||
super(index);
|
||||
this.type = type;
|
||||
this.id = id;
|
||||
}
|
||||
@ -195,7 +196,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
return parent;
|
||||
}
|
||||
|
||||
int shardId() {
|
||||
public ShardId getShardId() {
|
||||
return this.shardId;
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user