Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
024ed1b6ca
|
@ -117,7 +117,7 @@ For Eclipse, go to `Preferences->Java->Installed JREs` and add `-ea` to
|
|||
Please follow these formatting guidelines:
|
||||
|
||||
* Java indent is 4 spaces
|
||||
* Line width is 140 characters
|
||||
* Line width is 100 characters
|
||||
* The rest is left to Java coding standards
|
||||
* Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do.
|
||||
* Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them.
|
||||
|
|
31
build.gradle
31
build.gradle
|
@ -18,15 +18,17 @@
|
|||
*/
|
||||
|
||||
import java.nio.file.Path
|
||||
import java.util.regex.Matcher
|
||||
import org.eclipse.jgit.lib.Repository
|
||||
import org.eclipse.jgit.lib.RepositoryBuilder
|
||||
import org.gradle.plugins.ide.eclipse.model.SourceFolder
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
|
||||
// common maven publishing configuration
|
||||
subprojects {
|
||||
group = 'org.elasticsearch'
|
||||
version = org.elasticsearch.gradle.VersionProperties.elasticsearch
|
||||
version = VersionProperties.elasticsearch
|
||||
description = "Elasticsearch subproject ${project.path}"
|
||||
}
|
||||
|
||||
|
@ -59,12 +61,26 @@ configure(subprojects.findAll { it.projectDir.toPath().startsWith(rootPath) }) {
|
|||
}
|
||||
}
|
||||
|
||||
int prevMajor = Integer.parseInt(VersionProperties.elasticsearch.split('\\.')[0]) - 1
|
||||
String prevSnapshot = VersionProperties.elasticsearch.contains('alpha') ? '-SNAPSHOT' : ''
|
||||
File versionFile = file('core/src/main/java/org/elasticsearch/Version.java')
|
||||
List<String> versionLines = versionFile.readLines('UTF-8')
|
||||
int prevMinor = 0
|
||||
for (String line : versionLines) {
|
||||
Matcher match = line =~ /\W+public static final Version V_${prevMajor}_(\d+)_.*/
|
||||
if (match.matches()) {
|
||||
prevMinor = Math.max(Integer.parseInt(match.group(1)), prevMinor)
|
||||
}
|
||||
}
|
||||
|
||||
// injecting groovy property variables into all projects
|
||||
allprojects {
|
||||
// injecting groovy property variables into all projects
|
||||
project.ext {
|
||||
// for ide hacks...
|
||||
isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse')
|
||||
isIdea = System.getProperty("idea.active") != null || gradle.startParameter.taskNames.contains('idea') || gradle.startParameter.taskNames.contains('cleanIdea')
|
||||
// for backcompat testing
|
||||
bwcVersion = "${prevMajor}.${prevMinor}.0${prevSnapshot}"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -112,6 +128,7 @@ subprojects {
|
|||
"org.elasticsearch.client:transport:${version}": ':client:transport',
|
||||
"org.elasticsearch.test:framework:${version}": ':test:framework',
|
||||
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip',
|
||||
"org.elasticsearch.distribution.zip:elasticsearch:${bwcVersion}": ':distribution:bwc-zip',
|
||||
"org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip',
|
||||
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar',
|
||||
"org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm',
|
||||
|
@ -123,10 +140,12 @@ subprojects {
|
|||
"org.elasticsearch.plugin:lang-mustache-client:${version}": ':modules:lang-mustache',
|
||||
"org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator',
|
||||
]
|
||||
configurations.all {
|
||||
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->
|
||||
projectSubstitutions.each { k,v ->
|
||||
subs.substitute(subs.module(k)).with(subs.project(v))
|
||||
project.afterEvaluate {
|
||||
configurations.all {
|
||||
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->
|
||||
projectSubstitutions.each { k,v ->
|
||||
subs.substitute(subs.module(k)).with(subs.project(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -328,46 +328,15 @@ class BuildPlugin implements Plugin<Project> {
|
|||
return
|
||||
}
|
||||
|
||||
// check each dependency for any transitive deps
|
||||
// fix deps incorrectly marked as runtime back to compile time deps
|
||||
// see https://discuss.gradle.org/t/maven-publish-plugin-generated-pom-making-dependency-scope-runtime/7494/4
|
||||
for (Node depNode : depsNodes.get(0).children()) {
|
||||
String groupId = depNode.get('groupId').get(0).text()
|
||||
String artifactId = depNode.get('artifactId').get(0).text()
|
||||
String version = depNode.get('version').get(0).text()
|
||||
|
||||
// fix deps incorrectly marked as runtime back to compile time deps
|
||||
// see https://discuss.gradle.org/t/maven-publish-plugin-generated-pom-making-dependency-scope-runtime/7494/4
|
||||
boolean isCompileDep = project.configurations.compile.allDependencies.find { dep ->
|
||||
dep.name == depNode.artifactId.text()
|
||||
}
|
||||
if (depNode.scope.text() == 'runtime' && isCompileDep) {
|
||||
depNode.scope*.value = 'compile'
|
||||
}
|
||||
|
||||
// collect the transitive deps now that we know what this dependency is
|
||||
String depConfig = transitiveDepConfigName(groupId, artifactId, version)
|
||||
Configuration configuration = project.configurations.findByName(depConfig)
|
||||
if (configuration == null) {
|
||||
continue // we did not make this dep non-transitive
|
||||
}
|
||||
Set<ResolvedArtifact> artifacts = configuration.resolvedConfiguration.resolvedArtifacts
|
||||
if (artifacts.size() <= 1) {
|
||||
// this dep has no transitive deps (or the only artifact is itself)
|
||||
continue
|
||||
}
|
||||
|
||||
// we now know we have something to exclude, so add exclusions for all artifacts except the main one
|
||||
Node exclusions = depNode.appendNode('exclusions')
|
||||
for (ResolvedArtifact artifact : artifacts) {
|
||||
ModuleVersionIdentifier moduleVersionIdentifier = artifact.moduleVersion.id;
|
||||
String depGroupId = moduleVersionIdentifier.group
|
||||
String depArtifactId = moduleVersionIdentifier.name
|
||||
// add exclusions for all artifacts except the main one
|
||||
if (depGroupId != groupId || depArtifactId != artifactId) {
|
||||
Node exclusion = exclusions.appendNode('exclusion')
|
||||
exclusion.appendNode('groupId', depGroupId)
|
||||
exclusion.appendNode('artifactId', depArtifactId)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,6 +39,9 @@ class PluginPropertiesExtension {
|
|||
@Input
|
||||
String classname
|
||||
|
||||
@Input
|
||||
boolean hasNativeController = false
|
||||
|
||||
/** Indicates whether the plugin jar should be made available for the transport client. */
|
||||
@Input
|
||||
boolean hasClientJar = false
|
||||
|
|
|
@ -79,7 +79,8 @@ class PluginPropertiesTask extends Copy {
|
|||
'version': stringSnap(extension.version),
|
||||
'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch),
|
||||
'javaVersion': project.targetCompatibility as String,
|
||||
'classname': extension.classname
|
||||
'classname': extension.classname,
|
||||
'hasNativeController': extension.hasNativeController
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -217,7 +217,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
// Now we iterate over dependencies of the bats configuration. When a project dependency is found,
|
||||
// we bring back its own archives, test files or test utils.
|
||||
project.afterEvaluate {
|
||||
project.configurations.bats.dependencies.findAll {it.configuration == BATS }.each { d ->
|
||||
project.configurations.bats.dependencies.findAll {it.targetConfiguration == BATS }.each { d ->
|
||||
if (d instanceof DefaultProjectDependency) {
|
||||
DefaultProjectDependency externalBatsDependency = (DefaultProjectDependency) d
|
||||
Project externalBatsProject = externalBatsDependency.dependencyProject
|
||||
|
|
|
@ -605,7 +605,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JavaVersion.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Natives.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Security.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Spawner.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]StartupException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]SystemCallFilter.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cli[/\\]Command.java" checks="LineLength" />
|
||||
|
@ -1565,14 +1564,12 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]AnalysisPlugin.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]ClusterPlugin.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]DiscoveryPlugin.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]DummyPluginInfo.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]IngestPlugin.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]InstallPluginCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]ListPluginsCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]MetaDataUpgrader.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]NetworkPlugin.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]Plugin.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginInfo.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginSecurity.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]ProgressInputStream.java" checks="LineLength" />
|
||||
|
@ -2307,7 +2304,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]bulk[/\\]RetryTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]bulk[/\\]TransportBulkActionIngestTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]bulk[/\\]TransportBulkActionTookTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]bulk[/\\]TransportShardBulkActionTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]bulk[/\\]byscroll[/\\]AsyncBulkByScrollActionTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]bulk[/\\]byscroll[/\\]BulkByScrollParallelizationHelperTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]bulk[/\\]byscroll[/\\]BulkByScrollResponseTests.java" checks="LineLength" />
|
||||
|
@ -2388,7 +2384,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JNANativesTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JarHellTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]MaxMapCountCheckTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]SpawnerTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]broadcast[/\\]BroadcastActionsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bwcompat[/\\]OldIndexBackwardsCompatibilityIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bwcompat[/\\]RecoveryWithUnsupportedIndicesIT.java" checks="LineLength" />
|
||||
|
@ -3009,7 +3004,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]nodesinfo[/\\]NodeInfoStreamingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]nodesinfo[/\\]SimpleNodesInfoIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]operateAllIndices[/\\]DestructiveOperationsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginInfoTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]FullRollingRestartIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]RecoveriesCollectionTests.java" checks="LineLength" />
|
||||
|
@ -3949,13 +3943,9 @@
|
|||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]logging[/\\]EvilLoggerTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]env[/\\]NodeEnvironmentEvilTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]InstallPluginCommandTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]ListPluginsCommandTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginSecurityTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]RemovePluginCommandTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeUnitTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]no-bootstrap-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]SpawnerNoBootstrapTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-client[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]smoketest[/\\]ESSmokeClientTestCase.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-client[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]smoketest[/\\]SmokeTestClientIT.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-http[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]http[/\\]ContextAndHeaderTransportIT.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-http[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]http[/\\]CorsRegexIT.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-http[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]http[/\\]DeprecationHttpIT.java" checks="LineLength" />
|
||||
|
|
|
@ -44,4 +44,13 @@ java.net.URLConnection#getInputStream()
|
|||
java.net.Socket#connect(java.net.SocketAddress)
|
||||
java.net.Socket#connect(java.net.SocketAddress, int)
|
||||
java.nio.channels.SocketChannel#open(java.net.SocketAddress)
|
||||
java.nio.channels.SocketChannel#connect(java.net.SocketAddress)
|
||||
java.nio.channels.SocketChannel#connect(java.net.SocketAddress)
|
||||
|
||||
# This method is misleading, and uses lenient boolean parsing under the hood. If you intend to parse
|
||||
# a system property as a boolean, use
|
||||
# org.elasticsearch.common.Booleans#parseBoolean(java.lang.String) on the result of
|
||||
# java.lang.SystemProperty#getProperty(java.lang.String) instead. If you were not intending to parse
|
||||
# a system property as a boolean, but instead parse a string to a boolean, use
|
||||
# org.elasticsearch.common.Booleans#parseBoolean(java.lang.String) directly on the string.
|
||||
@defaultMessage use org.elasticsearch.common.Booleans#parseBoolean(java.lang.String)
|
||||
java.lang.Boolean#getBoolean(java.lang.String)
|
||||
|
|
|
@ -30,11 +30,15 @@ name=${name}
|
|||
# 'classname': the name of the class to load, fully-qualified.
|
||||
classname=${classname}
|
||||
#
|
||||
# 'java.version' version of java the code is built against
|
||||
# 'java.version': version of java the code is built against
|
||||
# use the system property java.specification.version
|
||||
# version string must be a sequence of nonnegative decimal integers
|
||||
# separated by "."'s and may have leading zeros
|
||||
java.version=${javaVersion}
|
||||
#
|
||||
# 'elasticsearch.version' version of elasticsearch compiled against
|
||||
# 'elasticsearch.version': version of elasticsearch compiled against
|
||||
elasticsearch.version=${elasticsearchVersion}
|
||||
### optional elements for plugins:
|
||||
#
|
||||
# 'has.native.controller': whether or not the plugin has a native controller
|
||||
has.native.controller=${hasNativeController}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
|
||||
elasticsearch = 6.0.0-alpha1
|
||||
lucene = 6.5.0-snapshot-d00c5ca
|
||||
lucene = 6.5.0
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
|
|
|
@ -79,9 +79,9 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testDelete() throws IOException {
|
||||
String index = randomAsciiOfLengthBetween(3, 10);
|
||||
String type = randomAsciiOfLengthBetween(3, 10);
|
||||
String id = randomAsciiOfLengthBetween(3, 10);
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
String id = randomAlphaOfLengthBetween(3, 10);
|
||||
DeleteRequest deleteRequest = new DeleteRequest(index, type, id);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
@ -93,12 +93,12 @@ public class RequestTests extends ESTestCase {
|
|||
|
||||
if (frequently()) {
|
||||
if (randomBoolean()) {
|
||||
String routing = randomAsciiOfLengthBetween(3, 10);
|
||||
String routing = randomAlphaOfLengthBetween(3, 10);
|
||||
deleteRequest.routing(routing);
|
||||
expectedParams.put("routing", routing);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String parent = randomAsciiOfLengthBetween(3, 10);
|
||||
String parent = randomAlphaOfLengthBetween(3, 10);
|
||||
deleteRequest.parent(parent);
|
||||
expectedParams.put("parent", parent);
|
||||
}
|
||||
|
@ -116,20 +116,20 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
private static void getAndExistsTest(Function<GetRequest, Request> requestConverter, String method) {
|
||||
String index = randomAsciiOfLengthBetween(3, 10);
|
||||
String type = randomAsciiOfLengthBetween(3, 10);
|
||||
String id = randomAsciiOfLengthBetween(3, 10);
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
String id = randomAlphaOfLengthBetween(3, 10);
|
||||
GetRequest getRequest = new GetRequest(index, type, id);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
if (randomBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
String preference = randomAsciiOfLengthBetween(3, 10);
|
||||
String preference = randomAlphaOfLengthBetween(3, 10);
|
||||
getRequest.preference(preference);
|
||||
expectedParams.put("preference", preference);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String routing = randomAsciiOfLengthBetween(3, 10);
|
||||
String routing = randomAlphaOfLengthBetween(3, 10);
|
||||
getRequest.routing(routing);
|
||||
expectedParams.put("routing", routing);
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ public class RequestTests extends ESTestCase {
|
|||
String[] storedFields = new String[numStoredFields];
|
||||
StringBuilder storedFieldsParam = new StringBuilder();
|
||||
for (int i = 0; i < numStoredFields; i++) {
|
||||
String storedField = randomAsciiOfLengthBetween(3, 10);
|
||||
String storedField = randomAlphaOfLengthBetween(3, 10);
|
||||
storedFields[i] = storedField;
|
||||
storedFieldsParam.append(storedField);
|
||||
if (i < numStoredFields - 1) {
|
||||
|
@ -188,11 +188,11 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testIndex() throws IOException {
|
||||
String index = randomAsciiOfLengthBetween(3, 10);
|
||||
String type = randomAsciiOfLengthBetween(3, 10);
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
IndexRequest indexRequest = new IndexRequest(index, type);
|
||||
|
||||
String id = randomBoolean() ? randomAsciiOfLengthBetween(3, 10) : null;
|
||||
String id = randomBoolean() ? randomAlphaOfLengthBetween(3, 10) : null;
|
||||
indexRequest.id(id);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
@ -219,17 +219,17 @@ public class RequestTests extends ESTestCase {
|
|||
|
||||
if (frequently()) {
|
||||
if (randomBoolean()) {
|
||||
String routing = randomAsciiOfLengthBetween(3, 10);
|
||||
String routing = randomAlphaOfLengthBetween(3, 10);
|
||||
indexRequest.routing(routing);
|
||||
expectedParams.put("routing", routing);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String parent = randomAsciiOfLengthBetween(3, 10);
|
||||
String parent = randomAlphaOfLengthBetween(3, 10);
|
||||
indexRequest.parent(parent);
|
||||
expectedParams.put("parent", parent);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String pipeline = randomAsciiOfLengthBetween(3, 10);
|
||||
String pipeline = randomAlphaOfLengthBetween(3, 10);
|
||||
indexRequest.setPipeline(pipeline);
|
||||
expectedParams.put("pipeline", pipeline);
|
||||
}
|
||||
|
@ -270,9 +270,9 @@ public class RequestTests extends ESTestCase {
|
|||
XContentType xContentType = randomFrom(XContentType.values());
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
String index = randomAsciiOfLengthBetween(3, 10);
|
||||
String type = randomAsciiOfLengthBetween(3, 10);
|
||||
String id = randomAsciiOfLengthBetween(3, 10);
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
String id = randomAlphaOfLengthBetween(3, 10);
|
||||
|
||||
UpdateRequest updateRequest = new UpdateRequest(index, type, id);
|
||||
updateRequest.detectNoop(randomBoolean());
|
||||
|
@ -295,12 +295,12 @@ public class RequestTests extends ESTestCase {
|
|||
updateRequest.upsert(new IndexRequest().source(source, xContentType));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String routing = randomAsciiOfLengthBetween(3, 10);
|
||||
String routing = randomAlphaOfLengthBetween(3, 10);
|
||||
updateRequest.routing(routing);
|
||||
expectedParams.put("routing", routing);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String parent = randomAsciiOfLengthBetween(3, 10);
|
||||
String parent = randomAlphaOfLengthBetween(3, 10);
|
||||
updateRequest.parent(parent);
|
||||
expectedParams.put("parent", parent);
|
||||
}
|
||||
|
@ -416,9 +416,9 @@ public class RequestTests extends ESTestCase {
|
|||
|
||||
int nbItems = randomIntBetween(10, 100);
|
||||
for (int i = 0; i < nbItems; i++) {
|
||||
String index = randomAsciiOfLength(5);
|
||||
String type = randomAsciiOfLength(5);
|
||||
String id = randomAsciiOfLength(5);
|
||||
String index = randomAlphaOfLength(5);
|
||||
String type = randomAlphaOfLength(5);
|
||||
String id = randomAlphaOfLength(5);
|
||||
|
||||
BytesReference source = RandomObjects.randomSource(random(), xContentType);
|
||||
DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values());
|
||||
|
@ -428,16 +428,16 @@ public class RequestTests extends ESTestCase {
|
|||
IndexRequest indexRequest = new IndexRequest(index, type, id).source(source, xContentType);
|
||||
docWriteRequest = indexRequest;
|
||||
if (randomBoolean()) {
|
||||
indexRequest.setPipeline(randomAsciiOfLength(5));
|
||||
indexRequest.setPipeline(randomAlphaOfLength(5));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
indexRequest.parent(randomAsciiOfLength(5));
|
||||
indexRequest.parent(randomAlphaOfLength(5));
|
||||
}
|
||||
} else if (opType == DocWriteRequest.OpType.CREATE) {
|
||||
IndexRequest createRequest = new IndexRequest(index, type, id).source(source, xContentType).create(true);
|
||||
docWriteRequest = createRequest;
|
||||
if (randomBoolean()) {
|
||||
createRequest.parent(randomAsciiOfLength(5));
|
||||
createRequest.parent(randomAlphaOfLength(5));
|
||||
}
|
||||
} else if (opType == DocWriteRequest.OpType.UPDATE) {
|
||||
final UpdateRequest updateRequest = new UpdateRequest(index, type, id).doc(new IndexRequest().source(source, xContentType));
|
||||
|
@ -449,14 +449,14 @@ public class RequestTests extends ESTestCase {
|
|||
randomizeFetchSourceContextParams(updateRequest::fetchSource, new HashMap<>());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
updateRequest.parent(randomAsciiOfLength(5));
|
||||
updateRequest.parent(randomAlphaOfLength(5));
|
||||
}
|
||||
} else if (opType == DocWriteRequest.OpType.DELETE) {
|
||||
docWriteRequest = new DeleteRequest(index, type, id);
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
docWriteRequest.routing(randomAsciiOfLength(10));
|
||||
docWriteRequest.routing(randomAlphaOfLength(10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
docWriteRequest.version(randomNonNegativeLong());
|
||||
|
@ -591,7 +591,7 @@ public class RequestTests extends ESTestCase {
|
|||
Map<String, String> expectedParams = new HashMap<>();
|
||||
for (int i = 0; i < nbParams; i++) {
|
||||
String paramName = "p_" + i;
|
||||
String paramValue = randomAsciiOfLength(5);
|
||||
String paramValue = randomAlphaOfLength(5);
|
||||
params.putParam(paramName, paramValue);
|
||||
expectedParams.put(paramName, paramValue);
|
||||
}
|
||||
|
@ -665,7 +665,7 @@ public class RequestTests extends ESTestCase {
|
|||
String[] includes = new String[numIncludes];
|
||||
StringBuilder includesParam = new StringBuilder();
|
||||
for (int i = 0; i < numIncludes; i++) {
|
||||
String include = randomAsciiOfLengthBetween(3, 10);
|
||||
String include = randomAlphaOfLengthBetween(3, 10);
|
||||
includes[i] = include;
|
||||
includesParam.append(include);
|
||||
if (i < numIncludes - 1) {
|
||||
|
@ -679,7 +679,7 @@ public class RequestTests extends ESTestCase {
|
|||
String[] excludes = new String[numExcludes];
|
||||
StringBuilder excludesParam = new StringBuilder();
|
||||
for (int i = 0; i < numExcludes; i++) {
|
||||
String exclude = randomAsciiOfLengthBetween(3, 10);
|
||||
String exclude = randomAlphaOfLengthBetween(3, 10);
|
||||
excludes[i] = exclude;
|
||||
excludesParam.append(exclude);
|
||||
if (i < numExcludes - 1) {
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
9ad2a7bd252cbdb76ac121287e670d75f4db2cd3
|
|
@ -0,0 +1 @@
|
|||
3989779b05ecd0ace6affe19223b1c27156604f1
|
|
@ -1 +0,0 @@
|
|||
c6a940eff8a87df40262b752ed7b135e448b7873
|
|
@ -0,0 +1 @@
|
|||
6a8660e7133f357ef40d9cac26316ccd9937a2eb
|
|
@ -1 +0,0 @@
|
|||
6ef5ad88141760c00ea041da1535f3ffc364d67d
|
|
@ -0,0 +1 @@
|
|||
ff176c9bde4228b43827849f5d2ff2e2717e3297
|
|
@ -1 +0,0 @@
|
|||
f15775571fb5762dfc92e00c3909cb8db8ff1d53
|
|
@ -0,0 +1 @@
|
|||
10d2e5b36f460527ac9b948be0ec3077bde5b0ca
|
|
@ -1 +0,0 @@
|
|||
051d793aa64257beead4ccc7432eb5df81d17f23
|
|
@ -0,0 +1 @@
|
|||
0019bb6a631ea0123e8e553b0510fa81c9d3c3eb
|
|
@ -1 +0,0 @@
|
|||
5bc4cba55670c14ea812ff5de65edad4c312fdf6
|
|
@ -0,0 +1 @@
|
|||
dad85baba266793b9ceb80a9b08c4ee9838e09df
|
|
@ -1 +0,0 @@
|
|||
68cf08bcd8414a57493debf3a6a509d78a9abb56
|
|
@ -0,0 +1 @@
|
|||
938f9f7efe8a403fd57c99aedd75d040d9caa896
|
|
@ -1 +0,0 @@
|
|||
f5d90756dbeda1218d723b7bea0799c88d621adb
|
|
@ -0,0 +1 @@
|
|||
afdff39ecb30f6e2c6f056a5bdfcb13d928a25af
|
|
@ -1 +0,0 @@
|
|||
9298e7d1ed96e7beb63d7ccdce1a4502eb0fe484
|
|
@ -0,0 +1 @@
|
|||
8e3971a008070712d57b59cf1f7b44c0d9d3df25
|
|
@ -1 +0,0 @@
|
|||
918de18963607af69dff38e4773c0bde89c73ae3
|
|
@ -0,0 +1 @@
|
|||
225b904edf91ccdffffa398e1924ebadd5677c09
|
|
@ -1 +0,0 @@
|
|||
a311a7d9f3e9a8fbf3a367a4e2731f9d4579732b
|
|
@ -0,0 +1 @@
|
|||
5c994fc5dc4f37133a861571211303d81c5d51ff
|
|
@ -1 +0,0 @@
|
|||
693bc4cb0e2e4465e0173c67ed0818071c4b460b
|
|
@ -0,0 +1 @@
|
|||
553b7b13bef994f14076a85557df03cad67322e9
|
|
@ -1 +0,0 @@
|
|||
0326f31e63c76d476c23488c7354265cf915350f
|
|
@ -0,0 +1 @@
|
|||
73deae791d861820974600705ba06e9f801cbe56
|
|
@ -1 +0,0 @@
|
|||
69a3a86e9d045f872408793ea411d49e0c577268
|
|
@ -0,0 +1 @@
|
|||
c2aad69500dac79338ef45f570cab47bec3d2724
|
|
@ -1 +0,0 @@
|
|||
fabc05ca175150171cf60370877276b933716bcd
|
|
@ -0,0 +1 @@
|
|||
acf211f2bf901dfc8155a46c5a42c5650edf74ef
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.apache.lucene.index;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Allows pkg private access
|
||||
*/
|
||||
|
@ -27,4 +29,33 @@ public class OneMergeHelper {
|
|||
public static String getSegmentName(MergePolicy.OneMerge merge) {
|
||||
return merge.info != null ? merge.info.info.name : "_na_";
|
||||
}
|
||||
|
||||
/**
|
||||
* The current MB per second rate limit for this merge.
|
||||
**/
|
||||
public static double getMbPerSec(Thread thread, MergePolicy.OneMerge merge) {
|
||||
if (thread instanceof ConcurrentMergeScheduler.MergeThread) {
|
||||
return ((ConcurrentMergeScheduler.MergeThread) thread).rateLimiter.getMBPerSec();
|
||||
}
|
||||
assert false: "this is not merge thread";
|
||||
return Double.POSITIVE_INFINITY;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns total bytes written by this merge.
|
||||
**/
|
||||
public static long getTotalBytesWritten(Thread thread,
|
||||
MergePolicy.OneMerge merge) throws IOException {
|
||||
/**
|
||||
* TODO: The number of bytes written during the merge should be accessible in OneMerge.
|
||||
*/
|
||||
if (thread instanceof ConcurrentMergeScheduler.MergeThread) {
|
||||
return ((ConcurrentMergeScheduler.MergeThread) thread).rateLimiter
|
||||
.getTotalBytesWritten();
|
||||
}
|
||||
assert false: "this is not merge thread";
|
||||
return merge.totalBytesSize();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.lucene.search.SortField;
|
|||
import org.apache.lucene.search.TopFieldDocs;
|
||||
import org.apache.lucene.util.PriorityQueue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -35,7 +34,7 @@ import java.util.Set;
|
|||
/**
|
||||
* Represents hits returned by {@link CollapsingTopDocsCollector#getTopDocs()}.
|
||||
*/
|
||||
public class CollapseTopFieldDocs extends TopFieldDocs {
|
||||
public final class CollapseTopFieldDocs extends TopFieldDocs {
|
||||
/** The field used for collapsing **/
|
||||
public final String field;
|
||||
/** The collapse value for each top doc */
|
||||
|
@ -49,22 +48,59 @@ public class CollapseTopFieldDocs extends TopFieldDocs {
|
|||
}
|
||||
|
||||
// Refers to one hit:
|
||||
private static class ShardRef {
|
||||
private static final class ShardRef {
|
||||
// Which shard (index into shardHits[]):
|
||||
final int shardIndex;
|
||||
|
||||
// True if we should use the incoming ScoreDoc.shardIndex for sort order
|
||||
final boolean useScoreDocIndex;
|
||||
|
||||
// Which hit within the shard:
|
||||
int hitIndex;
|
||||
|
||||
ShardRef(int shardIndex) {
|
||||
ShardRef(int shardIndex, boolean useScoreDocIndex) {
|
||||
this.shardIndex = shardIndex;
|
||||
this.useScoreDocIndex = useScoreDocIndex;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ShardRef(shardIndex=" + shardIndex + " hitIndex=" + hitIndex + ")";
|
||||
}
|
||||
};
|
||||
|
||||
int getShardIndex(ScoreDoc scoreDoc) {
|
||||
if (useScoreDocIndex) {
|
||||
if (scoreDoc.shardIndex == -1) {
|
||||
throw new IllegalArgumentException("setShardIndex is false but TopDocs["
|
||||
+ shardIndex + "].scoreDocs[" + hitIndex + "] is not set");
|
||||
}
|
||||
return scoreDoc.shardIndex;
|
||||
} else {
|
||||
// NOTE: we don't assert that shardIndex is -1 here, because caller could in fact have set it but asked us to ignore it now
|
||||
return shardIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* if we need to tie-break since score / sort value are the same we first compare shard index (lower shard wins)
|
||||
* and then iff shard index is the same we use the hit index.
|
||||
*/
|
||||
static boolean tieBreakLessThan(ShardRef first, ScoreDoc firstDoc, ShardRef second, ScoreDoc secondDoc) {
|
||||
final int firstShardIndex = first.getShardIndex(firstDoc);
|
||||
final int secondShardIndex = second.getShardIndex(secondDoc);
|
||||
// Tie break: earlier shard wins
|
||||
if (firstShardIndex < secondShardIndex) {
|
||||
return true;
|
||||
} else if (firstShardIndex > secondShardIndex) {
|
||||
return false;
|
||||
} else {
|
||||
// Tie break in same shard: resolve however the
|
||||
// shard had resolved it:
|
||||
assert first.hitIndex != second.hitIndex;
|
||||
return first.hitIndex < second.hitIndex;
|
||||
}
|
||||
}
|
||||
|
||||
private static class MergeSortQueue extends PriorityQueue<ShardRef> {
|
||||
// These are really FieldDoc instances:
|
||||
|
@ -72,7 +108,7 @@ public class CollapseTopFieldDocs extends TopFieldDocs {
|
|||
final FieldComparator<?>[] comparators;
|
||||
final int[] reverseMul;
|
||||
|
||||
MergeSortQueue(Sort sort, CollapseTopFieldDocs[] shardHits) throws IOException {
|
||||
MergeSortQueue(Sort sort, CollapseTopFieldDocs[] shardHits) {
|
||||
super(shardHits.length);
|
||||
this.shardHits = new ScoreDoc[shardHits.length][];
|
||||
for (int shardIDX = 0; shardIDX < shardHits.length; shardIDX++) {
|
||||
|
@ -115,18 +151,7 @@ public class CollapseTopFieldDocs extends TopFieldDocs {
|
|||
return cmp < 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Tie break: earlier shard wins
|
||||
if (first.shardIndex < second.shardIndex) {
|
||||
return true;
|
||||
} else if (first.shardIndex > second.shardIndex) {
|
||||
return false;
|
||||
} else {
|
||||
// Tie break in same shard: resolve however the
|
||||
// shard had resolved it:
|
||||
assert first.hitIndex != second.hitIndex;
|
||||
return first.hitIndex < second.hitIndex;
|
||||
}
|
||||
return tieBreakLessThan(first, firstFD, second, secondFD);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,7 +160,7 @@ public class CollapseTopFieldDocs extends TopFieldDocs {
|
|||
* the provided CollapseTopDocs, sorting by score. Each {@link CollapseTopFieldDocs} instance must be sorted.
|
||||
**/
|
||||
public static CollapseTopFieldDocs merge(Sort sort, int start, int size,
|
||||
CollapseTopFieldDocs[] shardHits) throws IOException {
|
||||
CollapseTopFieldDocs[] shardHits, boolean setShardIndex) {
|
||||
String collapseField = shardHits[0].field;
|
||||
for (int i = 1; i < shardHits.length; i++) {
|
||||
if (collapseField.equals(shardHits[i].field) == false) {
|
||||
|
@ -155,7 +180,7 @@ public class CollapseTopFieldDocs extends TopFieldDocs {
|
|||
totalHitCount += shard.totalHits;
|
||||
if (shard.scoreDocs != null && shard.scoreDocs.length > 0) {
|
||||
availHitCount += shard.scoreDocs.length;
|
||||
queue.add(new ShardRef(shardIDX));
|
||||
queue.add(new ShardRef(shardIDX, setShardIndex == false));
|
||||
maxScore = Math.max(maxScore, shard.getMaxScore());
|
||||
}
|
||||
}
|
||||
|
@ -192,7 +217,9 @@ public class CollapseTopFieldDocs extends TopFieldDocs {
|
|||
continue;
|
||||
}
|
||||
seen.add(collapseValue);
|
||||
hit.shardIndex = ref.shardIndex;
|
||||
if (setShardIndex) {
|
||||
hit.shardIndex = ref.shardIndex;
|
||||
}
|
||||
if (hitUpto >= start) {
|
||||
hitList.add(hit);
|
||||
collapseList.add(collapseValue);
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.support.replication.ReplicationOperation;
|
|||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -712,7 +713,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
* in id order below. If you want to remove an exception leave a tombstone comment and mark the id as null in
|
||||
* ExceptionSerializationTests.testIds.ids.
|
||||
*/
|
||||
enum ElasticsearchExceptionHandle {
|
||||
private enum ElasticsearchExceptionHandle {
|
||||
INDEX_SHARD_SNAPSHOT_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0, UNKNOWN_VERSION_ADDED),
|
||||
DFS_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class,
|
||||
|
@ -1006,6 +1007,30 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an array of all registered handle IDs. These are the IDs for every registered
|
||||
* exception.
|
||||
*
|
||||
* @return an array of all registered handle IDs
|
||||
*/
|
||||
static int[] ids() {
|
||||
return Arrays.stream(ElasticsearchExceptionHandle.values()).mapToInt(h -> h.id).toArray();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an array of all registered pairs of handle IDs and exception classes. These pairs are
|
||||
* provided for every registered exception.
|
||||
*
|
||||
* @return an array of all registered pairs of handle IDs and exception classes
|
||||
*/
|
||||
static Tuple<Integer, Class<? extends ElasticsearchException>>[] classes() {
|
||||
@SuppressWarnings("unchecked")
|
||||
final Tuple<Integer, Class<? extends ElasticsearchException>>[] ts =
|
||||
Arrays.stream(ElasticsearchExceptionHandle.values())
|
||||
.map(h -> Tuple.tuple(h.id, h.exceptionClass)).toArray(Tuple[]::new);
|
||||
return ts;
|
||||
}
|
||||
|
||||
static {
|
||||
ID_TO_SUPPLIER = unmodifiableMap(Arrays
|
||||
.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.id, e -> e.constructor)));
|
||||
|
|
|
@ -116,6 +116,8 @@ public class Version implements Comparable<Version> {
|
|||
public static final Version V_5_2_3_UNRELEASED = new Version(V_5_2_3_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_3_0_ID_UNRELEASED = 5030099;
|
||||
public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_3_1_ID_UNRELEASED = 5030199;
|
||||
public static final Version V_5_3_1_UNRELEASED = new Version(V_5_3_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_4_0_ID_UNRELEASED = 5040099;
|
||||
public static final Version V_5_4_0_UNRELEASED = new Version(V_5_4_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
|
||||
|
@ -138,6 +140,10 @@ public class Version implements Comparable<Version> {
|
|||
switch (id) {
|
||||
case V_6_0_0_alpha1_ID_UNRELEASED:
|
||||
return V_6_0_0_alpha1_UNRELEASED;
|
||||
case V_5_4_0_ID_UNRELEASED:
|
||||
return V_5_4_0_UNRELEASED;
|
||||
case V_5_3_1_ID_UNRELEASED:
|
||||
return V_5_3_1_UNRELEASED;
|
||||
case V_5_3_0_ID_UNRELEASED:
|
||||
return V_5_3_0_UNRELEASED;
|
||||
case V_5_2_3_ID_UNRELEASED:
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.CheckedConsumer;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
|
|
|
@ -149,6 +149,9 @@ import org.elasticsearch.action.delete.DeleteAction;
|
|||
import org.elasticsearch.action.delete.TransportDeleteAction;
|
||||
import org.elasticsearch.action.explain.ExplainAction;
|
||||
import org.elasticsearch.action.explain.TransportExplainAction;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction;
|
||||
import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction;
|
||||
import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesIndexAction;
|
||||
import org.elasticsearch.action.fieldstats.FieldStatsAction;
|
||||
import org.elasticsearch.action.fieldstats.TransportFieldStatsAction;
|
||||
import org.elasticsearch.action.get.GetAction;
|
||||
|
@ -205,6 +208,7 @@ import org.elasticsearch.plugins.ActionPlugin;
|
|||
import org.elasticsearch.plugins.ActionPlugin.ActionHandler;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestHandler;
|
||||
import org.elasticsearch.rest.action.RestFieldCapabilitiesAction;
|
||||
import org.elasticsearch.rest.action.RestFieldStatsAction;
|
||||
import org.elasticsearch.rest.action.RestMainAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestCancelTasksAction;
|
||||
|
@ -479,6 +483,8 @@ public class ActionModule extends AbstractModule {
|
|||
actions.register(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class);
|
||||
|
||||
actions.register(FieldStatsAction.INSTANCE, TransportFieldStatsAction.class);
|
||||
actions.register(FieldCapabilitiesAction.INSTANCE, TransportFieldCapabilitiesAction.class,
|
||||
TransportFieldCapabilitiesIndexAction.class);
|
||||
|
||||
actions.register(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class);
|
||||
actions.register(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class);
|
||||
|
@ -587,6 +593,7 @@ public class ActionModule extends AbstractModule {
|
|||
registerHandler.accept(new RestDeleteStoredScriptAction(settings, restController));
|
||||
|
||||
registerHandler.accept(new RestFieldStatsAction(settings, restController));
|
||||
registerHandler.accept(new RestFieldCapabilitiesAction(settings, restController));
|
||||
|
||||
// Tasks API
|
||||
registerHandler.accept(new RestListTasksAction(settings, restController, nodesInCluster));
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* A listener that ensures that only one of onResponse or onFailure is called. And the method
|
||||
* the is called is only called once. Subclasses should implement notification logic with
|
||||
* innerOnResponse and innerOnFailure.
|
||||
*/
|
||||
public abstract class NotifyOnceListener<Response> implements ActionListener<Response> {
|
||||
|
||||
private final AtomicBoolean hasBeenCalled = new AtomicBoolean(false);
|
||||
|
||||
protected abstract void innerOnResponse(Response response);
|
||||
|
||||
protected abstract void innerOnFailure(Exception e);
|
||||
|
||||
@Override
|
||||
public final void onResponse(Response response) {
|
||||
if (hasBeenCalled.compareAndSet(false, true)) {
|
||||
innerOnResponse(response);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void onFailure(Exception e) {
|
||||
if (hasBeenCalled.compareAndSet(false, true)) {
|
||||
innerOnFailure(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
|
@ -73,7 +74,8 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
this.plugins.addAll(nodeResponse.nodeInfo().getPlugins().getPluginInfos());
|
||||
|
||||
// now do the stats that should be deduped by hardware (implemented by ip deduping)
|
||||
TransportAddress publishAddress = nodeResponse.nodeInfo().getTransport().address().publishAddress();
|
||||
TransportAddress publishAddress =
|
||||
nodeResponse.nodeInfo().getTransport().address().publishAddress();
|
||||
final InetAddress inetAddress = publishAddress.address().getAddress();
|
||||
if (!seenAddresses.add(inetAddress)) {
|
||||
continue;
|
||||
|
@ -209,7 +211,8 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params)
|
||||
throws IOException {
|
||||
builder.field(Fields.TOTAL, total);
|
||||
for (Map.Entry<String, Integer> entry : roles.entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
|
@ -280,7 +283,8 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params)
|
||||
throws IOException {
|
||||
builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors);
|
||||
builder.field(Fields.ALLOCATED_PROCESSORS, allocatedProcessors);
|
||||
builder.startArray(Fields.NAMES);
|
||||
|
@ -326,7 +330,8 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
// fd can be -1 if not supported on platform
|
||||
totalOpenFileDescriptors += fd;
|
||||
}
|
||||
// we still do min max calc on -1, so we'll have an indication of it not being supported on one of the nodes.
|
||||
// we still do min max calc on -1, so we'll have an indication
|
||||
// of it not being supported on one of the nodes.
|
||||
minOpenFileDescriptors = Math.min(minOpenFileDescriptors, fd);
|
||||
maxOpenFileDescriptors = Math.max(maxOpenFileDescriptors, fd);
|
||||
}
|
||||
|
@ -375,7 +380,8 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params)
|
||||
throws IOException {
|
||||
builder.startObject(Fields.CPU).field(Fields.PERCENT, cpuPercent).endObject();
|
||||
if (count > 0) {
|
||||
builder.startObject(Fields.OPEN_FILE_DESCRIPTORS);
|
||||
|
@ -479,7 +485,8 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params)
|
||||
throws IOException {
|
||||
builder.timeValueField(Fields.MAX_UPTIME_IN_MILLIS, Fields.MAX_UPTIME, maxUptime);
|
||||
builder.startArray(Fields.VERSIONS);
|
||||
for (ObjectIntCursor<JvmVersion> v : versions) {
|
||||
|
@ -540,17 +547,25 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
private final Map<String, AtomicInteger> transportTypes;
|
||||
private final Map<String, AtomicInteger> httpTypes;
|
||||
|
||||
private NetworkTypes(final List<NodeInfo> nodeInfos) {
|
||||
NetworkTypes(final List<NodeInfo> nodeInfos) {
|
||||
final Map<String, AtomicInteger> transportTypes = new HashMap<>();
|
||||
final Map<String, AtomicInteger> httpTypes = new HashMap<>();
|
||||
for (final NodeInfo nodeInfo : nodeInfos) {
|
||||
final Settings settings = nodeInfo.getSettings();
|
||||
final String transportType =
|
||||
settings.get(NetworkModule.TRANSPORT_TYPE_KEY, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
|
||||
settings.get(NetworkModule.TRANSPORT_TYPE_KEY,
|
||||
NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
|
||||
final String httpType =
|
||||
settings.get(NetworkModule.HTTP_TYPE_KEY, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
||||
transportTypes.computeIfAbsent(transportType, k -> new AtomicInteger()).incrementAndGet();
|
||||
httpTypes.computeIfAbsent(httpType, k -> new AtomicInteger()).incrementAndGet();
|
||||
settings.get(NetworkModule.HTTP_TYPE_KEY,
|
||||
NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
||||
if (Strings.hasText(transportType)) {
|
||||
transportTypes.computeIfAbsent(transportType,
|
||||
k -> new AtomicInteger()).incrementAndGet();
|
||||
}
|
||||
if (Strings.hasText(httpType)) {
|
||||
httpTypes.computeIfAbsent(httpType,
|
||||
k -> new AtomicInteger()).incrementAndGet();
|
||||
}
|
||||
}
|
||||
this.transportTypes = Collections.unmodifiableMap(transportTypes);
|
||||
this.httpTypes = Collections.unmodifiableMap(httpTypes);
|
||||
|
|
|
@ -20,48 +20,28 @@
|
|||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public interface MappingUpdatePerformer {
|
||||
/**
|
||||
* Determine if any mappings need to be updated, and update them on the
|
||||
* master node if necessary. Returnes a failed {@code Engine.IndexResult}
|
||||
* in the event updating the mappings fails or null if successful.
|
||||
* Throws a {@code ReplicationOperation.RetryOnPrimaryException} if the
|
||||
* operation needs to be retried on the primary due to the mappings not
|
||||
* being present yet, or a different exception if updating the mappings
|
||||
* on the master failed.
|
||||
*/
|
||||
@Nullable
|
||||
MappingUpdateResult updateMappingsIfNeeded(IndexShard primary, IndexRequest request) throws Exception;
|
||||
|
||||
/**
|
||||
* Class encapsulating the resulting of potentially updating the mapping
|
||||
* Determine if any mappings need to be updated, and update them on the master node if
|
||||
* necessary. Returnes a failure Exception in the event updating the mappings fails or null if
|
||||
* successful.
|
||||
*/
|
||||
class MappingUpdateResult {
|
||||
@Nullable
|
||||
public final Engine.Index operation;
|
||||
@Nullable
|
||||
public final Exception failure;
|
||||
void updateMappingsIfNeeded(Engine.Index operation,
|
||||
ShardId shardId,
|
||||
String type) throws Exception;
|
||||
|
||||
MappingUpdateResult(Exception failure) {
|
||||
Objects.requireNonNull(failure, "failure cannot be null");
|
||||
this.failure = failure;
|
||||
this.operation = null;
|
||||
}
|
||||
/**
|
||||
* Throws a {@code ReplicationOperation.RetryOnPrimaryException} if the operation needs to be
|
||||
* retried on the primary due to the mappings not being present yet, or a different exception if
|
||||
* updating the mappings on the master failed.
|
||||
*/
|
||||
void verifyMappings(Engine.Index operation, ShardId shardId) throws Exception;
|
||||
|
||||
MappingUpdateResult(Engine.Index operation) {
|
||||
Objects.requireNonNull(operation, "operation cannot be null");
|
||||
this.operation = operation;
|
||||
this.failure = null;
|
||||
}
|
||||
|
||||
public boolean isFailed() {
|
||||
return failure != null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,6 +65,9 @@ import org.elasticsearch.indices.IndicesService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.index.translog.Translog.Location;
|
||||
import org.elasticsearch.action.bulk.BulkItemResultHolder;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
@ -154,10 +157,23 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
}
|
||||
}
|
||||
|
||||
static Translog.Location calculateTranslogLocation(final Translog.Location originalLocation,
|
||||
final BulkItemResultHolder bulkItemResult) {
|
||||
final Engine.Result operationResult = bulkItemResult.operationResult;
|
||||
if (operationResult != null && operationResult.hasFailure() == false) {
|
||||
return locationToSync(originalLocation, operationResult.getTranslogLocation());
|
||||
} else {
|
||||
return originalLocation;
|
||||
}
|
||||
}
|
||||
|
||||
// Visible for unit testing
|
||||
static Translog.Location updateReplicaRequest(BulkItemResultHolder bulkItemResult,
|
||||
/**
|
||||
* Creates a BulkItemResponse for the primary operation and returns it. If no bulk response is
|
||||
* needed (because one already exists and the operation failed), then return null.
|
||||
*/
|
||||
static BulkItemResponse createPrimaryResponse(BulkItemResultHolder bulkItemResult,
|
||||
final DocWriteRequest.OpType opType,
|
||||
final Translog.Location originalLocation,
|
||||
BulkShardRequest request) {
|
||||
final Engine.Result operationResult = bulkItemResult.operationResult;
|
||||
final DocWriteResponse response = bulkItemResult.response;
|
||||
|
@ -165,16 +181,13 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
|
||||
if (operationResult == null) { // in case of noop update operation
|
||||
assert response.getResult() == DocWriteResponse.Result.NOOP : "only noop updates can have a null operation";
|
||||
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response));
|
||||
return originalLocation;
|
||||
return new BulkItemResponse(replicaRequest.id(), opType, response);
|
||||
|
||||
} else if (operationResult.hasFailure() == false) {
|
||||
BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response);
|
||||
replicaRequest.setPrimaryResponse(primaryResponse);
|
||||
// set a blank ShardInfo so we can safely send it to the replicas. We won't use it in the real response though.
|
||||
primaryResponse.getResponse().setShardInfo(new ShardInfo());
|
||||
// The operation was successful, advance the translog
|
||||
return locationToSync(originalLocation, operationResult.getTranslogLocation());
|
||||
return primaryResponse;
|
||||
|
||||
} else {
|
||||
DocWriteRequest docWriteRequest = replicaRequest.request();
|
||||
|
@ -187,19 +200,19 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
|
||||
}
|
||||
|
||||
|
||||
// if it's a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
|
||||
// then just use the response we got from the failed execution
|
||||
if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) {
|
||||
replicaRequest.setPrimaryResponse(
|
||||
new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(),
|
||||
// Make sure to use request.indox() here, if you
|
||||
// use docWriteRequest.index() it will use the
|
||||
// concrete index instead of an alias if used!
|
||||
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)));
|
||||
return new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(),
|
||||
// Make sure to use request.index() here, if you
|
||||
// use docWriteRequest.index() it will use the
|
||||
// concrete index instead of an alias if used!
|
||||
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure));
|
||||
} else {
|
||||
assert replicaRequest.getPrimaryResponse() != null : "replica request must have a primary response";
|
||||
return null;
|
||||
}
|
||||
return originalLocation;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -233,11 +246,14 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
// update the bulk item request because update request execution can mutate the bulk item request
|
||||
request.items()[requestIndex] = replicaRequest;
|
||||
|
||||
// Modify the replica request, if needed, and return a new translog location
|
||||
location = updateReplicaRequest(responseHolder, opType, location, request);
|
||||
// Retrieve the primary response, and update the replica request with the primary's response
|
||||
BulkItemResponse primaryResponse = createPrimaryResponse(responseHolder, opType, request);
|
||||
if (primaryResponse != null) {
|
||||
replicaRequest.setPrimaryResponse(primaryResponse);
|
||||
}
|
||||
|
||||
assert replicaRequest.getPrimaryResponse() != null : "replica request must have a primary response";
|
||||
return location;
|
||||
// Update the translog with the new location, if needed
|
||||
return calculateTranslogLocation(location, responseHolder);
|
||||
}
|
||||
|
||||
private static boolean isConflictException(final Exception e) {
|
||||
|
@ -396,14 +412,16 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
return new WriteReplicaResult<>(request, location, null, replica, logger);
|
||||
}
|
||||
|
||||
private static Translog.Location locationToSync(Translog.Location current, Translog.Location next) {
|
||||
/* here we are moving forward in the translog with each operation. Under the hood
|
||||
* this might cross translog files which is ok since from the user perspective
|
||||
* the translog is like a tape where only the highest location needs to be fsynced
|
||||
* in order to sync all previous locations even though they are not in the same file.
|
||||
* When the translog rolls over files the previous file is fsynced on after closing if needed.*/
|
||||
private static Translog.Location locationToSync(Translog.Location current,
|
||||
Translog.Location next) {
|
||||
/* here we are moving forward in the translog with each operation. Under the hood this might
|
||||
* cross translog files which is ok since from the user perspective the translog is like a
|
||||
* tape where only the highest location needs to be fsynced in order to sync all previous
|
||||
* locations even though they are not in the same file. When the translog rolls over files
|
||||
* the previous file is fsynced on after closing if needed.*/
|
||||
assert next != null : "next operation can't be null";
|
||||
assert current == null || current.compareTo(next) < 0 : "translog locations are not increasing";
|
||||
assert current == null || current.compareTo(next) < 0 :
|
||||
"translog locations are not increasing";
|
||||
return next;
|
||||
}
|
||||
|
||||
|
@ -411,45 +429,82 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
* Execute the given {@link IndexRequest} on a replica shard, throwing a
|
||||
* {@link RetryOnReplicaException} if the operation needs to be re-tried.
|
||||
*/
|
||||
public static Engine.IndexResult executeIndexRequestOnReplica(DocWriteResponse primaryResponse, IndexRequest request, IndexShard replica) throws IOException {
|
||||
final ShardId shardId = replica.shardId();
|
||||
SourceToParse sourceToParse =
|
||||
SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source(),
|
||||
request.getContentType()).routing(request.routing()).parent(request.parent());
|
||||
public static Engine.IndexResult executeIndexRequestOnReplica(
|
||||
DocWriteResponse primaryResponse,
|
||||
IndexRequest request,
|
||||
IndexShard replica) throws IOException {
|
||||
|
||||
final Engine.Index operation;
|
||||
final long version = primaryResponse.getVersion();
|
||||
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
|
||||
assert versionType.validateVersionForWrites(version);
|
||||
final long seqNo = primaryResponse.getSeqNo();
|
||||
try {
|
||||
operation = replica.prepareIndexOnReplica(sourceToParse, seqNo, version, versionType, request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||
operation = prepareIndexOperationOnReplica(primaryResponse, request, replica);
|
||||
} catch (MapperParsingException e) {
|
||||
return new Engine.IndexResult(e, version, seqNo);
|
||||
return new Engine.IndexResult(e, primaryResponse.getVersion(),
|
||||
primaryResponse.getSeqNo());
|
||||
}
|
||||
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
if (update != null) {
|
||||
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
|
||||
final ShardId shardId = replica.shardId();
|
||||
throw new RetryOnReplicaException(shardId,
|
||||
"Mappings are not available on the replica yet, triggered update: " + update);
|
||||
}
|
||||
return replica.index(operation);
|
||||
}
|
||||
|
||||
/** Utility method to prepare an index operation on replica shards */
|
||||
static Engine.Index prepareIndexOperationOnReplica(
|
||||
DocWriteResponse primaryResponse,
|
||||
IndexRequest request,
|
||||
IndexShard replica) {
|
||||
|
||||
final ShardId shardId = replica.shardId();
|
||||
final long version = primaryResponse.getVersion();
|
||||
final long seqNo = primaryResponse.getSeqNo();
|
||||
final SourceToParse sourceToParse =
|
||||
SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(),
|
||||
request.type(), request.id(), request.source(), request.getContentType())
|
||||
.routing(request.routing()).parent(request.parent());
|
||||
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
|
||||
assert versionType.validateVersionForWrites(version);
|
||||
|
||||
return replica.prepareIndexOnReplica(sourceToParse, seqNo, version, versionType,
|
||||
request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||
}
|
||||
|
||||
/** Utility method to prepare an index operation on primary shards */
|
||||
static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
|
||||
SourceToParse sourceToParse =
|
||||
SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source(),
|
||||
request.getContentType()).routing(request.routing()).parent(request.parent());
|
||||
return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||
final SourceToParse sourceToParse =
|
||||
SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(),
|
||||
request.id(), request.source(), request.getContentType())
|
||||
.routing(request.routing()).parent(request.parent());
|
||||
return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(),
|
||||
request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||
}
|
||||
|
||||
/** Executes index operation on primary shard after updates mapping if dynamic mappings are found */
|
||||
public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary,
|
||||
MappingUpdatePerformer mappingUpdater) throws Exception {
|
||||
MappingUpdatePerformer.MappingUpdateResult result = mappingUpdater.updateMappingsIfNeeded(primary, request);
|
||||
if (result.isFailed()) {
|
||||
return new Engine.IndexResult(result.failure, request.version());
|
||||
// Update the mappings if parsing the documents includes new dynamic updates
|
||||
try {
|
||||
final Engine.Index preUpdateOperation = prepareIndexOperationOnPrimary(request, primary);
|
||||
mappingUpdater.updateMappingsIfNeeded(preUpdateOperation, primary.shardId(), request.type());
|
||||
} catch (MapperParsingException | IllegalArgumentException failure) {
|
||||
return new Engine.IndexResult(failure, request.version());
|
||||
}
|
||||
return primary.index(result.operation);
|
||||
|
||||
// Verify that there are no more mappings that need to be applied. If there are failures, a
|
||||
// ReplicationOperation.RetryOnPrimaryException is thrown.
|
||||
final Engine.Index operation;
|
||||
try {
|
||||
operation = prepareIndexOperationOnPrimary(request, primary);
|
||||
mappingUpdater.verifyMappings(operation, primary.shardId());
|
||||
} catch (MapperParsingException | IllegalStateException e) {
|
||||
// there was an error in parsing the document that was not because
|
||||
// of pending mapping updates, so return a failure for the result
|
||||
return new Engine.IndexResult(e, request.version());
|
||||
}
|
||||
|
||||
return primary.index(operation);
|
||||
}
|
||||
|
||||
private static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) throws IOException {
|
||||
|
@ -468,36 +523,22 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
|
||||
class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer {
|
||||
|
||||
@Nullable
|
||||
public MappingUpdateResult updateMappingsIfNeeded(IndexShard primary, IndexRequest request) throws Exception {
|
||||
Engine.Index operation;
|
||||
try {
|
||||
operation = prepareIndexOperationOnPrimary(request, primary);
|
||||
} catch (MapperParsingException | IllegalArgumentException e) {
|
||||
return new MappingUpdateResult(e);
|
||||
}
|
||||
public void updateMappingsIfNeeded(final Engine.Index operation, final ShardId shardId,
|
||||
final String type) throws Exception {
|
||||
final Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
final ShardId shardId = primary.shardId();
|
||||
if (update != null) {
|
||||
// can throw timeout exception when updating mappings or ISE for attempting to update default mappings
|
||||
// which are bubbled up
|
||||
try {
|
||||
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// throws IAE on conflicts merging dynamic mappings
|
||||
return new MappingUpdateResult(e);
|
||||
}
|
||||
try {
|
||||
operation = prepareIndexOperationOnPrimary(request, primary);
|
||||
} catch (MapperParsingException | IllegalArgumentException e) {
|
||||
return new MappingUpdateResult(e);
|
||||
}
|
||||
if (operation.parsedDoc().dynamicMappingsUpdate() != null) {
|
||||
throw new ReplicationOperation.RetryOnPrimaryException(shardId,
|
||||
"Dynamic mappings are not available on the node that holds the primary yet");
|
||||
}
|
||||
// can throw timeout exception when updating mappings or ISE for attempting to
|
||||
// update default mappings which are bubbled up
|
||||
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), type, update);
|
||||
}
|
||||
}
|
||||
|
||||
public void verifyMappings(final Engine.Index operation,
|
||||
final ShardId shardId) throws Exception {
|
||||
if (operation.parsedDoc().dynamicMappingsUpdate() != null) {
|
||||
throw new ReplicationOperation.RetryOnPrimaryException(shardId,
|
||||
"Dynamic mappings are not available on the node that holds the primary yet");
|
||||
}
|
||||
return new MappingUpdateResult(operation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,6 +74,8 @@ public class DeleteByQueryRequest extends AbstractBulkByScrollRequest<DeleteByQu
|
|||
}
|
||||
if (getSearchRequest() == null || getSearchRequest().source() == null) {
|
||||
e = addValidationError("source is missing", e);
|
||||
} else if (getSearchRequest().source().query() == null) {
|
||||
e = addValidationError("query is missing", e);
|
||||
}
|
||||
return e;
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ public class ParentBulkByScrollTask extends BulkByScrollTask {
|
|||
* Holds the responses as they come back. This uses {@link Tuple} as an "Either" style holder where only the response or the exception
|
||||
* is set.
|
||||
*/
|
||||
private final AtomicArray<Tuple<BulkByScrollResponse, Exception>> results;
|
||||
private final AtomicArray<Result> results;
|
||||
private final AtomicInteger counter;
|
||||
|
||||
public ParentBulkByScrollTask(long id, String type, String action, String description, TaskId parentTaskId, int slices) {
|
||||
|
@ -82,13 +82,11 @@ public class ParentBulkByScrollTask extends BulkByScrollTask {
|
|||
}
|
||||
|
||||
private void addResultsToList(List<StatusOrException> sliceStatuses) {
|
||||
for (AtomicArray.Entry<Tuple<BulkByScrollResponse, Exception>> t : results.asList()) {
|
||||
if (t.value != null) {
|
||||
if (t.value.v1() != null) {
|
||||
sliceStatuses.set(t.index, new StatusOrException(t.value.v1().getStatus()));
|
||||
} else {
|
||||
sliceStatuses.set(t.index, new StatusOrException(t.value.v2()));
|
||||
}
|
||||
for (Result t : results.asList()) {
|
||||
if (t.response != null) {
|
||||
sliceStatuses.set(t.sliceId, new StatusOrException(t.response.getStatus()));
|
||||
} else {
|
||||
sliceStatuses.set(t.sliceId, new StatusOrException(t.failure));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -97,7 +95,7 @@ public class ParentBulkByScrollTask extends BulkByScrollTask {
|
|||
* Record a response from a slice and respond to the listener if the request is finished.
|
||||
*/
|
||||
public void onSliceResponse(ActionListener<BulkByScrollResponse> listener, int sliceId, BulkByScrollResponse response) {
|
||||
results.setOnce(sliceId, new Tuple<>(response, null));
|
||||
results.setOnce(sliceId, new Result(sliceId, response));
|
||||
/* If the request isn't finished we could automatically rethrottle the sub-requests here but we would only want to do that if we
|
||||
* were fairly sure they had a while left to go. */
|
||||
recordSliceCompletionAndRespondIfAllDone(listener);
|
||||
|
@ -107,7 +105,7 @@ public class ParentBulkByScrollTask extends BulkByScrollTask {
|
|||
* Record a failure from a slice and respond to the listener if the request is finished.
|
||||
*/
|
||||
void onSliceFailure(ActionListener<BulkByScrollResponse> listener, int sliceId, Exception e) {
|
||||
results.setOnce(sliceId, new Tuple<>(null, e));
|
||||
results.setOnce(sliceId, new Result(sliceId, e));
|
||||
recordSliceCompletionAndRespondIfAllDone(listener);
|
||||
// TODO cancel when a slice fails?
|
||||
}
|
||||
|
@ -118,17 +116,17 @@ public class ParentBulkByScrollTask extends BulkByScrollTask {
|
|||
}
|
||||
List<BulkByScrollResponse> responses = new ArrayList<>(results.length());
|
||||
Exception exception = null;
|
||||
for (AtomicArray.Entry<Tuple<BulkByScrollResponse, Exception>> t : results.asList()) {
|
||||
if (t.value.v1() == null) {
|
||||
assert t.value.v2() != null : "exception shouldn't be null if value is null";
|
||||
for (Result t : results.asList()) {
|
||||
if (t.response == null) {
|
||||
assert t.failure != null : "exception shouldn't be null if value is null";
|
||||
if (exception == null) {
|
||||
exception = t.value.v2();
|
||||
exception = t.failure;
|
||||
} else {
|
||||
exception.addSuppressed(t.value.v2());
|
||||
exception.addSuppressed(t.failure);
|
||||
}
|
||||
} else {
|
||||
assert t.value.v2() == null : "exception should be null if response is not null";
|
||||
responses.add(t.value.v1());
|
||||
assert t.failure == null : "exception should be null if response is not null";
|
||||
responses.add(t.response);
|
||||
}
|
||||
}
|
||||
if (exception == null) {
|
||||
|
@ -138,4 +136,21 @@ public class ParentBulkByScrollTask extends BulkByScrollTask {
|
|||
}
|
||||
}
|
||||
|
||||
private static final class Result {
|
||||
final BulkByScrollResponse response;
|
||||
final int sliceId;
|
||||
final Exception failure;
|
||||
|
||||
private Result(int sliceId, BulkByScrollResponse response) {
|
||||
this.sliceId = sliceId;
|
||||
this.response = response;
|
||||
failure = null;
|
||||
}
|
||||
|
||||
private Result(int sliceId, Exception failure) {
|
||||
this.sliceId = sliceId;
|
||||
this.failure = failure;
|
||||
response = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,282 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
|
||||
/**
|
||||
* Describes the capabilities of a field optionally merged across multiple indices.
|
||||
*/
|
||||
public class FieldCapabilities implements Writeable, ToXContent {
|
||||
private final String name;
|
||||
private final String type;
|
||||
private final boolean isSearchable;
|
||||
private final boolean isAggregatable;
|
||||
|
||||
private final String[] indices;
|
||||
private final String[] nonSearchableIndices;
|
||||
private final String[] nonAggregatableIndices;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param name The name of the field.
|
||||
* @param type The type associated with the field.
|
||||
* @param isSearchable Whether this field is indexed for search.
|
||||
* @param isAggregatable Whether this field can be aggregated on.
|
||||
*/
|
||||
FieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable) {
|
||||
this(name, type, isSearchable, isAggregatable, null, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param name The name of the field
|
||||
* @param type The type associated with the field.
|
||||
* @param isSearchable Whether this field is indexed for search.
|
||||
* @param isAggregatable Whether this field can be aggregated on.
|
||||
* @param indices The list of indices where this field name is defined as {@code type},
|
||||
* or null if all indices have the same {@code type} for the field.
|
||||
* @param nonSearchableIndices The list of indices where this field is not searchable,
|
||||
* or null if the field is searchable in all indices.
|
||||
* @param nonAggregatableIndices The list of indices where this field is not aggregatable,
|
||||
* or null if the field is aggregatable in all indices.
|
||||
*/
|
||||
FieldCapabilities(String name, String type,
|
||||
boolean isSearchable, boolean isAggregatable,
|
||||
String[] indices,
|
||||
String[] nonSearchableIndices,
|
||||
String[] nonAggregatableIndices) {
|
||||
this.name = name;
|
||||
this.type = type;
|
||||
this.isSearchable = isSearchable;
|
||||
this.isAggregatable = isAggregatable;
|
||||
this.indices = indices;
|
||||
this.nonSearchableIndices = nonSearchableIndices;
|
||||
this.nonAggregatableIndices = nonAggregatableIndices;
|
||||
}
|
||||
|
||||
FieldCapabilities(StreamInput in) throws IOException {
|
||||
this.name = in.readString();
|
||||
this.type = in.readString();
|
||||
this.isSearchable = in.readBoolean();
|
||||
this.isAggregatable = in.readBoolean();
|
||||
this.indices = in.readOptionalStringArray();
|
||||
this.nonSearchableIndices = in.readOptionalStringArray();
|
||||
this.nonAggregatableIndices = in.readOptionalStringArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(name);
|
||||
out.writeString(type);
|
||||
out.writeBoolean(isSearchable);
|
||||
out.writeBoolean(isAggregatable);
|
||||
out.writeOptionalStringArray(indices);
|
||||
out.writeOptionalStringArray(nonSearchableIndices);
|
||||
out.writeOptionalStringArray(nonAggregatableIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("type", type);
|
||||
builder.field("searchable", isSearchable);
|
||||
builder.field("aggregatable", isAggregatable);
|
||||
if (indices != null) {
|
||||
builder.field("indices", indices);
|
||||
}
|
||||
if (nonSearchableIndices != null) {
|
||||
builder.field("non_searchable_indices", nonSearchableIndices);
|
||||
}
|
||||
if (nonAggregatableIndices != null) {
|
||||
builder.field("non_aggregatable_indices", nonAggregatableIndices);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* The name of the field.
|
||||
*/
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether this field is indexed for search on all indices.
|
||||
*/
|
||||
public boolean isAggregatable() {
|
||||
return isAggregatable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether this field can be aggregated on all indices.
|
||||
*/
|
||||
public boolean isSearchable() {
|
||||
return isSearchable;
|
||||
}
|
||||
|
||||
/**
|
||||
* The type of the field.
|
||||
*/
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
/**
|
||||
* The list of indices where this field name is defined as {@code type},
|
||||
* or null if all indices have the same {@code type} for the field.
|
||||
*/
|
||||
public String[] indices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
/**
|
||||
* The list of indices where this field is not searchable,
|
||||
* or null if the field is searchable in all indices.
|
||||
*/
|
||||
public String[] nonSearchableIndices() {
|
||||
return nonSearchableIndices;
|
||||
}
|
||||
|
||||
/**
|
||||
* The list of indices where this field is not aggregatable,
|
||||
* or null if the field is aggregatable in all indices.
|
||||
*/
|
||||
public String[] nonAggregatableIndices() {
|
||||
return nonAggregatableIndices;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
FieldCapabilities that = (FieldCapabilities) o;
|
||||
|
||||
if (isSearchable != that.isSearchable) return false;
|
||||
if (isAggregatable != that.isAggregatable) return false;
|
||||
if (!name.equals(that.name)) return false;
|
||||
if (!type.equals(that.type)) return false;
|
||||
if (!Arrays.equals(indices, that.indices)) return false;
|
||||
if (!Arrays.equals(nonSearchableIndices, that.nonSearchableIndices)) return false;
|
||||
return Arrays.equals(nonAggregatableIndices, that.nonAggregatableIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = name.hashCode();
|
||||
result = 31 * result + type.hashCode();
|
||||
result = 31 * result + (isSearchable ? 1 : 0);
|
||||
result = 31 * result + (isAggregatable ? 1 : 0);
|
||||
result = 31 * result + Arrays.hashCode(indices);
|
||||
result = 31 * result + Arrays.hashCode(nonSearchableIndices);
|
||||
result = 31 * result + Arrays.hashCode(nonAggregatableIndices);
|
||||
return result;
|
||||
}
|
||||
|
||||
static class Builder {
|
||||
private String name;
|
||||
private String type;
|
||||
private boolean isSearchable;
|
||||
private boolean isAggregatable;
|
||||
private List<IndexCaps> indiceList;
|
||||
|
||||
Builder(String name, String type) {
|
||||
this.name = name;
|
||||
this.type = type;
|
||||
this.isSearchable = true;
|
||||
this.isAggregatable = true;
|
||||
this.indiceList = new ArrayList<>();
|
||||
}
|
||||
|
||||
void add(String index, boolean search, boolean agg) {
|
||||
IndexCaps indexCaps = new IndexCaps(index, search, agg);
|
||||
indiceList.add(indexCaps);
|
||||
this.isSearchable &= search;
|
||||
this.isAggregatable &= agg;
|
||||
}
|
||||
|
||||
FieldCapabilities build(boolean withIndices) {
|
||||
final String[] indices;
|
||||
/* Eclipse can't deal with o -> o.name, maybe because of
|
||||
* https://bugs.eclipse.org/bugs/show_bug.cgi?id=511750 */
|
||||
Collections.sort(indiceList, Comparator.comparing((IndexCaps o) -> o.name));
|
||||
if (withIndices) {
|
||||
indices = indiceList.stream()
|
||||
.map(caps -> caps.name)
|
||||
.toArray(String[]::new);
|
||||
} else {
|
||||
indices = null;
|
||||
}
|
||||
|
||||
final String[] nonSearchableIndices;
|
||||
if (isSearchable == false &&
|
||||
indiceList.stream().anyMatch((caps) -> caps.isSearchable)) {
|
||||
// Iff this field is searchable in some indices AND non-searchable in others
|
||||
// we record the list of non-searchable indices
|
||||
nonSearchableIndices = indiceList.stream()
|
||||
.filter((caps) -> caps.isSearchable == false)
|
||||
.map(caps -> caps.name)
|
||||
.toArray(String[]::new);
|
||||
} else {
|
||||
nonSearchableIndices = null;
|
||||
}
|
||||
|
||||
final String[] nonAggregatableIndices;
|
||||
if (isAggregatable == false &&
|
||||
indiceList.stream().anyMatch((caps) -> caps.isAggregatable)) {
|
||||
// Iff this field is aggregatable in some indices AND non-searchable in others
|
||||
// we keep the list of non-aggregatable indices
|
||||
nonAggregatableIndices = indiceList.stream()
|
||||
.filter((caps) -> caps.isAggregatable == false)
|
||||
.map(caps -> caps.name)
|
||||
.toArray(String[]::new);
|
||||
} else {
|
||||
nonAggregatableIndices = null;
|
||||
}
|
||||
return new FieldCapabilities(name, type, isSearchable, isAggregatable,
|
||||
indices, nonSearchableIndices, nonAggregatableIndices);
|
||||
}
|
||||
}
|
||||
|
||||
private static class IndexCaps {
|
||||
final String name;
|
||||
final boolean isSearchable;
|
||||
final boolean isAggregatable;
|
||||
|
||||
IndexCaps(String name, boolean isSearchable, boolean isAggregatable) {
|
||||
this.name = name;
|
||||
this.isSearchable = isSearchable;
|
||||
this.isAggregatable = isAggregatable;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class FieldCapabilitiesAction extends Action<FieldCapabilitiesRequest,
|
||||
FieldCapabilitiesResponse, FieldCapabilitiesRequestBuilder> {
|
||||
|
||||
public static final FieldCapabilitiesAction INSTANCE = new FieldCapabilitiesAction();
|
||||
public static final String NAME = "indices:data/read/field_caps";
|
||||
|
||||
private FieldCapabilitiesAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldCapabilitiesResponse newResponse() {
|
||||
return new FieldCapabilitiesResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldCapabilitiesRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new FieldCapabilitiesRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.single.shard.SingleShardRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class FieldCapabilitiesIndexRequest
|
||||
extends SingleShardRequest<FieldCapabilitiesIndexRequest> {
|
||||
|
||||
private String[] fields;
|
||||
|
||||
// For serialization
|
||||
FieldCapabilitiesIndexRequest() {}
|
||||
|
||||
FieldCapabilitiesIndexRequest(String[] fields, String index) {
|
||||
super(index);
|
||||
if (fields == null || fields.length == 0) {
|
||||
throw new IllegalArgumentException("specified fields can't be null or empty");
|
||||
}
|
||||
this.fields = fields;
|
||||
}
|
||||
|
||||
public String[] fields() {
|
||||
return fields;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
fields = in.readStringArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArray(fields);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Response for {@link FieldCapabilitiesIndexRequest} requests.
|
||||
*/
|
||||
public class FieldCapabilitiesIndexResponse extends ActionResponse {
|
||||
private String indexName;
|
||||
private Map<String, FieldCapabilities> responseMap;
|
||||
|
||||
FieldCapabilitiesIndexResponse(String indexName, Map<String, FieldCapabilities> responseMap) {
|
||||
this.indexName = indexName;
|
||||
this.responseMap = responseMap;
|
||||
}
|
||||
|
||||
FieldCapabilitiesIndexResponse() {
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get the index name
|
||||
*/
|
||||
public String getIndexName() {
|
||||
return indexName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the field capabilities map
|
||||
*/
|
||||
public Map<String, FieldCapabilities> get() {
|
||||
return responseMap;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Get the field capabilities for the provided {@code field}
|
||||
*/
|
||||
public FieldCapabilities getField(String field) {
|
||||
return responseMap.get(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
this.indexName = in.readString();
|
||||
this.responseMap =
|
||||
in.readMap(StreamInput::readString, FieldCapabilities::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(indexName);
|
||||
out.writeMap(responseMap,
|
||||
StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
FieldCapabilitiesIndexResponse that = (FieldCapabilitiesIndexResponse) o;
|
||||
|
||||
return responseMap.equals(that.responseMap);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return responseMap.hashCode();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.ValidateActions;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ObjectParser.fromList;
|
||||
|
||||
public class FieldCapabilitiesRequest extends ActionRequest
|
||||
implements IndicesRequest.Replaceable {
|
||||
public static final ParseField FIELDS_FIELD = new ParseField("fields");
|
||||
public static final String NAME = "field_caps_request";
|
||||
private String[] indices = Strings.EMPTY_ARRAY;
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
|
||||
private String[] fields = Strings.EMPTY_ARRAY;
|
||||
|
||||
private static ObjectParser<FieldCapabilitiesRequest, Void> PARSER =
|
||||
new ObjectParser<>(NAME, FieldCapabilitiesRequest::new);
|
||||
|
||||
static {
|
||||
PARSER.declareStringArray(fromList(String.class, FieldCapabilitiesRequest::fields),
|
||||
FIELDS_FIELD);
|
||||
}
|
||||
|
||||
public FieldCapabilitiesRequest() {}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
fields = in.readStringArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArray(fields);
|
||||
}
|
||||
|
||||
public static FieldCapabilitiesRequest parseFields(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* The list of field names to retrieve
|
||||
*/
|
||||
public FieldCapabilitiesRequest fields(String... fields) {
|
||||
if (fields == null || fields.length == 0) {
|
||||
throw new IllegalArgumentException("specified fields can't be null or empty");
|
||||
}
|
||||
Set<String> fieldSet = new HashSet<>(Arrays.asList(fields));
|
||||
this.fields = fieldSet.toArray(new String[0]);
|
||||
return this;
|
||||
}
|
||||
|
||||
public String[] fields() {
|
||||
return fields;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* The list of indices to lookup
|
||||
*/
|
||||
public FieldCapabilitiesRequest indices(String... indices) {
|
||||
this.indices = indices;
|
||||
return this;
|
||||
}
|
||||
|
||||
public FieldCapabilitiesRequest indicesOptions(IndicesOptions indicesOptions) {
|
||||
this.indicesOptions = indicesOptions;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] indices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndicesOptions indicesOptions() {
|
||||
return indicesOptions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (fields == null || fields.length == 0) {
|
||||
validationException =
|
||||
ValidateActions.addValidationError("no fields specified", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
FieldCapabilitiesRequest that = (FieldCapabilitiesRequest) o;
|
||||
|
||||
if (!Arrays.equals(indices, that.indices)) return false;
|
||||
if (!indicesOptions.equals(that.indicesOptions)) return false;
|
||||
return Arrays.equals(fields, that.fields);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Arrays.hashCode(indices);
|
||||
result = 31 * result + indicesOptions.hashCode();
|
||||
result = 31 * result + Arrays.hashCode(fields);
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -17,25 +17,25 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.query;
|
||||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public abstract class QuerySearchResultProvider extends TransportResponse implements SearchPhaseResult {
|
||||
|
||||
/**
|
||||
* Returns the query result iff it's included in this response otherwise <code>null</code>
|
||||
*/
|
||||
public QuerySearchResult queryResult() {
|
||||
return null;
|
||||
public class FieldCapabilitiesRequestBuilder extends
|
||||
ActionRequestBuilder<FieldCapabilitiesRequest, FieldCapabilitiesResponse,
|
||||
FieldCapabilitiesRequestBuilder> {
|
||||
public FieldCapabilitiesRequestBuilder(ElasticsearchClient client,
|
||||
FieldCapabilitiesAction action,
|
||||
String... indices) {
|
||||
super(client, action, new FieldCapabilitiesRequest().indices(indices));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the fetch result iff it's included in this response otherwise <code>null</code>
|
||||
* The list of field names to retrieve.
|
||||
*/
|
||||
public FetchSearchResult fetchResult() {
|
||||
return null;
|
||||
public FieldCapabilitiesRequestBuilder setFields(String... fields) {
|
||||
request().fields(fields);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Response for {@link FieldCapabilitiesRequest} requests.
|
||||
*/
|
||||
public class FieldCapabilitiesResponse extends ActionResponse implements ToXContent {
|
||||
private Map<String, Map<String, FieldCapabilities>> responseMap;
|
||||
|
||||
FieldCapabilitiesResponse(Map<String, Map<String, FieldCapabilities>> responseMap) {
|
||||
this.responseMap = responseMap;
|
||||
}
|
||||
|
||||
/**
|
||||
* Used for serialization
|
||||
*/
|
||||
FieldCapabilitiesResponse() {
|
||||
this.responseMap = Collections.emptyMap();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the field capabilities map.
|
||||
*/
|
||||
public Map<String, Map<String, FieldCapabilities>> get() {
|
||||
return responseMap;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Get the field capabilities per type for the provided {@code field}.
|
||||
*/
|
||||
public Map<String, FieldCapabilities> getField(String field) {
|
||||
return responseMap.get(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
this.responseMap =
|
||||
in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField);
|
||||
}
|
||||
|
||||
private static Map<String, FieldCapabilities> readField(StreamInput in) throws IOException {
|
||||
return in.readMap(StreamInput::readString, FieldCapabilities::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField);
|
||||
}
|
||||
|
||||
private static void writeField(StreamOutput out,
|
||||
Map<String, FieldCapabilities> map) throws IOException {
|
||||
out.writeMap(map, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut));
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("fields", responseMap);
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
FieldCapabilitiesResponse that = (FieldCapabilitiesResponse) o;
|
||||
|
||||
return responseMap.equals(that.responseMap);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return responseMap.hashCode();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
public class TransportFieldCapabilitiesAction
|
||||
extends HandledTransportAction<FieldCapabilitiesRequest, FieldCapabilitiesResponse> {
|
||||
private final ClusterService clusterService;
|
||||
private final TransportFieldCapabilitiesIndexAction shardAction;
|
||||
|
||||
@Inject
|
||||
public TransportFieldCapabilitiesAction(Settings settings, TransportService transportService,
|
||||
ClusterService clusterService, ThreadPool threadPool,
|
||||
TransportFieldCapabilitiesIndexAction shardAction,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver
|
||||
indexNameExpressionResolver) {
|
||||
super(settings, FieldCapabilitiesAction.NAME, threadPool, transportService,
|
||||
actionFilters, indexNameExpressionResolver, FieldCapabilitiesRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.shardAction = shardAction;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(FieldCapabilitiesRequest request,
|
||||
final ActionListener<FieldCapabilitiesResponse> listener) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] concreteIndices =
|
||||
indexNameExpressionResolver.concreteIndexNames(clusterState, request);
|
||||
final AtomicInteger indexCounter = new AtomicInteger();
|
||||
final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length);
|
||||
final AtomicReferenceArray<Object> indexResponses =
|
||||
new AtomicReferenceArray<>(concreteIndices.length);
|
||||
if (concreteIndices.length == 0) {
|
||||
listener.onResponse(new FieldCapabilitiesResponse());
|
||||
} else {
|
||||
for (String index : concreteIndices) {
|
||||
FieldCapabilitiesIndexRequest indexRequest =
|
||||
new FieldCapabilitiesIndexRequest(request.fields(), index);
|
||||
shardAction.execute(indexRequest,
|
||||
new ActionListener<FieldCapabilitiesIndexResponse> () {
|
||||
@Override
|
||||
public void onResponse(FieldCapabilitiesIndexResponse result) {
|
||||
indexResponses.set(indexCounter.getAndIncrement(), result);
|
||||
if (completionCounter.decrementAndGet() == 0) {
|
||||
listener.onResponse(merge(indexResponses));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
indexResponses.set(indexCounter.getAndIncrement(), e);
|
||||
if (completionCounter.decrementAndGet() == 0) {
|
||||
listener.onResponse(merge(indexResponses));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private FieldCapabilitiesResponse merge(AtomicReferenceArray<Object> indexResponses) {
|
||||
Map<String, Map<String, FieldCapabilities.Builder>> responseMapBuilder = new HashMap<> ();
|
||||
for (int i = 0; i < indexResponses.length(); i++) {
|
||||
Object element = indexResponses.get(i);
|
||||
if (element instanceof FieldCapabilitiesIndexResponse == false) {
|
||||
assert element instanceof Exception;
|
||||
continue;
|
||||
}
|
||||
FieldCapabilitiesIndexResponse response = (FieldCapabilitiesIndexResponse) element;
|
||||
for (String field : response.get().keySet()) {
|
||||
Map<String, FieldCapabilities.Builder> typeMap = responseMapBuilder.get(field);
|
||||
if (typeMap == null) {
|
||||
typeMap = new HashMap<> ();
|
||||
responseMapBuilder.put(field, typeMap);
|
||||
}
|
||||
FieldCapabilities fieldCap = response.getField(field);
|
||||
FieldCapabilities.Builder builder = typeMap.get(fieldCap.getType());
|
||||
if (builder == null) {
|
||||
builder = new FieldCapabilities.Builder(field, fieldCap.getType());
|
||||
typeMap.put(fieldCap.getType(), builder);
|
||||
}
|
||||
builder.add(response.getIndexName(),
|
||||
fieldCap.isSearchable(), fieldCap.isAggregatable());
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
||||
for (Map.Entry<String, Map<String, FieldCapabilities.Builder>> entry :
|
||||
responseMapBuilder.entrySet()) {
|
||||
Map<String, FieldCapabilities> typeMap = new HashMap<>();
|
||||
boolean multiTypes = entry.getValue().size() > 1;
|
||||
for (Map.Entry<String, FieldCapabilities.Builder> fieldEntry :
|
||||
entry.getValue().entrySet()) {
|
||||
typeMap.put(fieldEntry.getKey(), fieldEntry.getValue().build(multiTypes));
|
||||
}
|
||||
responseMap.put(entry.getKey(), typeMap);
|
||||
}
|
||||
|
||||
return new FieldCapabilitiesResponse(responseMap);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class TransportFieldCapabilitiesIndexAction
|
||||
extends TransportSingleShardAction<FieldCapabilitiesIndexRequest,
|
||||
FieldCapabilitiesIndexResponse> {
|
||||
|
||||
private static final String ACTION_NAME = FieldCapabilitiesAction.NAME + "[index]";
|
||||
|
||||
protected final ClusterService clusterService;
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@Inject
|
||||
public TransportFieldCapabilitiesIndexAction(Settings settings,
|
||||
ClusterService clusterService,
|
||||
TransportService transportService,
|
||||
IndicesService indicesService,
|
||||
ThreadPool threadPool,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver
|
||||
indexNameExpressionResolver) {
|
||||
super(settings,
|
||||
ACTION_NAME,
|
||||
threadPool,
|
||||
clusterService,
|
||||
transportService,
|
||||
actionFilters,
|
||||
indexNameExpressionResolver,
|
||||
FieldCapabilitiesIndexRequest::new,
|
||||
ThreadPool.Names.MANAGEMENT);
|
||||
this.clusterService = clusterService;
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean resolveIndex(FieldCapabilitiesIndexRequest request) {
|
||||
//internal action, index already resolved
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardsIterator shards(ClusterState state, InternalRequest request) {
|
||||
// Will balance requests between shards
|
||||
// Resolve patterns and deduplicate
|
||||
return state.routingTable().index(request.concreteIndex()).randomAllActiveShardsIt();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FieldCapabilitiesIndexResponse shardOperation(
|
||||
final FieldCapabilitiesIndexRequest request,
|
||||
ShardId shardId) {
|
||||
MapperService mapperService =
|
||||
indicesService.indexServiceSafe(shardId.getIndex()).mapperService();
|
||||
Set<String> fieldNames = new HashSet<>();
|
||||
for (String field : request.fields()) {
|
||||
fieldNames.addAll(mapperService.simpleMatchToIndexNames(field));
|
||||
}
|
||||
Map<String, FieldCapabilities> responseMap = new HashMap<>();
|
||||
for (String field : fieldNames) {
|
||||
MappedFieldType ft = mapperService.fullName(field);
|
||||
FieldCapabilities fieldCap = new FieldCapabilities(field,
|
||||
ft.typeName(),
|
||||
ft.isSearchable(),
|
||||
ft.isAggregatable());
|
||||
responseMap.put(field, fieldCap);
|
||||
}
|
||||
return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), responseMap);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FieldCapabilitiesIndexResponse newResponse() {
|
||||
return new FieldCapabilitiesIndexResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state,
|
||||
InternalRequest request) {
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ,
|
||||
request.concreteIndex());
|
||||
}
|
||||
}
|
|
@ -44,6 +44,7 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetRequest.Item>, CompositeIndicesRequest, RealtimeRequest {
|
||||
|
||||
|
@ -319,6 +320,14 @@ public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetR
|
|||
boolean allowExplicitIndex) throws IOException {
|
||||
XContentParser.Token token;
|
||||
String currentFieldName = null;
|
||||
if ((token = parser.nextToken()) != XContentParser.Token.START_OBJECT) {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"unexpected token [%s], expected [%s]",
|
||||
token,
|
||||
XContentParser.Token.START_OBJECT);
|
||||
throw new ParsingException(parser.getTokenLocation(), message);
|
||||
}
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
|
@ -327,7 +336,22 @@ public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetR
|
|||
parseDocuments(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting, allowExplicitIndex);
|
||||
} else if ("ids".equals(currentFieldName)) {
|
||||
parseIds(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting);
|
||||
} else {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"unknown key [%s] for a %s, expected [docs] or [ids]",
|
||||
currentFieldName,
|
||||
token);
|
||||
throw new ParsingException(parser.getTokenLocation(), message);
|
||||
}
|
||||
} else {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"unexpected token [%s], expected [%s] or [%s]",
|
||||
token,
|
||||
XContentParser.Token.FIELD_NAME,
|
||||
XContentParser.Token.START_ARRAY);
|
||||
throw new ParsingException(parser.getTokenLocation(), message);
|
||||
}
|
||||
}
|
||||
return this;
|
||||
|
|
|
@ -131,7 +131,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
} else {
|
||||
if (logger.isTraceEnabled()) {
|
||||
final String resultsFrom = results.getSuccessfulResults()
|
||||
.map(r -> r.shardTarget().toString()).collect(Collectors.joining(","));
|
||||
.map(r -> r.getSearchShardTarget().toString()).collect(Collectors.joining(","));
|
||||
logger.trace("[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})",
|
||||
currentPhase.getName(), nextPhase.getName(), resultsFrom, clusterStateVersion);
|
||||
}
|
||||
|
@ -159,10 +159,10 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
List<ShardSearchFailure> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
failures[i] = entries.get(i);
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
@ -209,8 +209,8 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
private void raisePhaseFailure(SearchPhaseExecutionException exception) {
|
||||
results.getSuccessfulResults().forEach((entry) -> {
|
||||
try {
|
||||
Transport.Connection connection = nodeIdToConnection.apply(entry.shardTarget().getNodeId());
|
||||
sendReleaseSearchContext(entry.id(), connection);
|
||||
Transport.Connection connection = nodeIdToConnection.apply(entry.getSearchShardTarget().getNodeId());
|
||||
sendReleaseSearchContext(entry.getRequestId(), connection);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(exception);
|
||||
logger.trace("failed to release context", inner);
|
||||
|
@ -220,18 +220,18 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
}
|
||||
|
||||
@Override
|
||||
public final void onShardSuccess(int shardIndex, Result result) {
|
||||
public final void onShardSuccess(Result result) {
|
||||
successfulOps.incrementAndGet();
|
||||
results.consumeResult(shardIndex, result);
|
||||
results.consumeResult(result);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null);
|
||||
logger.trace("got first-phase result from {}", result != null ? result.getSearchShardTarget() : null);
|
||||
}
|
||||
// clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
|
||||
// so its ok concurrency wise to miss potentially the shard failures being created because of another failure
|
||||
// in the #addShardFailure, because by definition, it will happen on *another* shardIndex
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures.get();
|
||||
if (shardFailures != null) {
|
||||
shardFailures.set(shardIndex, null);
|
||||
shardFailures.set(result.getShardIndex(), null);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,18 +23,20 @@ import org.elasticsearch.common.util.concurrent.CountDown;
|
|||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* This is a simple base class to simplify fan out to shards and collect their results. Each results passed to
|
||||
* {@link #onResult(int, SearchPhaseResult, SearchShardTarget)} will be set to the provided result array
|
||||
* {@link #onResult(SearchPhaseResult)} will be set to the provided result array
|
||||
* where the given index is used to set the result on the array.
|
||||
*/
|
||||
final class CountedCollector<R extends SearchPhaseResult> {
|
||||
private final ResultConsumer<R> resultConsumer;
|
||||
private final Consumer<R> resultConsumer;
|
||||
private final CountDown counter;
|
||||
private final Runnable onFinish;
|
||||
private final SearchPhaseContext context;
|
||||
|
||||
CountedCollector(ResultConsumer<R> resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) {
|
||||
CountedCollector(Consumer<R> resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) {
|
||||
this.resultConsumer = resultConsumer;
|
||||
this.counter = new CountDown(expectedOps);
|
||||
this.onFinish = onFinish;
|
||||
|
@ -55,10 +57,9 @@ final class CountedCollector<R extends SearchPhaseResult> {
|
|||
/**
|
||||
* Sets the result to the given array index and then runs {@link #countDown()}
|
||||
*/
|
||||
void onResult(int index, R result, SearchShardTarget target) {
|
||||
void onResult(R result) {
|
||||
try {
|
||||
result.shardTarget(target);
|
||||
resultConsumer.consume(index, result);
|
||||
resultConsumer.accept(result);
|
||||
} finally {
|
||||
countDown();
|
||||
}
|
||||
|
@ -75,12 +76,4 @@ final class CountedCollector<R extends SearchPhaseResult> {
|
|||
countDown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A functional interface to plug in shard result consumers to this collector
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface ResultConsumer<R extends SearchPhaseResult> {
|
||||
void consume(int shardIndex, R result);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,16 +20,17 @@ package org.elasticsearch.action.search;
|
|||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
|
@ -40,16 +41,16 @@ import java.util.function.Function;
|
|||
* @see CountedCollector#onFailure(int, SearchShardTarget, Exception)
|
||||
*/
|
||||
final class DfsQueryPhase extends SearchPhase {
|
||||
private final InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> queryResult;
|
||||
private final InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> queryResult;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final AtomicArray<DfsSearchResult> dfsSearchResults;
|
||||
private final Function<InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider>, SearchPhase> nextPhaseFactory;
|
||||
private final Function<InitialSearchPhase.SearchPhaseResults<SearchPhaseResult>, SearchPhase> nextPhaseFactory;
|
||||
private final SearchPhaseContext context;
|
||||
private final SearchTransportService searchTransportService;
|
||||
|
||||
DfsQueryPhase(AtomicArray<DfsSearchResult> dfsSearchResults,
|
||||
SearchPhaseController searchPhaseController,
|
||||
Function<InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider>, SearchPhase> nextPhaseFactory,
|
||||
Function<InitialSearchPhase.SearchPhaseResults<SearchPhaseResult>, SearchPhase> nextPhaseFactory,
|
||||
SearchPhaseContext context) {
|
||||
super("dfs_query");
|
||||
this.queryResult = searchPhaseController.newSearchPhaseResults(context.getRequest(), context.getNumShards());
|
||||
|
@ -64,22 +65,26 @@ final class DfsQueryPhase extends SearchPhase {
|
|||
public void run() throws IOException {
|
||||
// TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs
|
||||
// to free up memory early
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsSearchResults);
|
||||
final CountedCollector<QuerySearchResultProvider> counter = new CountedCollector<>(queryResult::consumeResult,
|
||||
dfsSearchResults.asList().size(),
|
||||
() -> {
|
||||
context.executeNextPhase(this, nextPhaseFactory.apply(queryResult));
|
||||
}, context);
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : dfsSearchResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
final int shardIndex = entry.index;
|
||||
final SearchShardTarget searchShardTarget = dfsResult.shardTarget();
|
||||
final List<DfsSearchResult> resultList = dfsSearchResults.asList();
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(resultList);
|
||||
final CountedCollector<SearchPhaseResult> counter = new CountedCollector<>(queryResult::consumeResult,
|
||||
resultList.size(),
|
||||
() -> context.executeNextPhase(this, nextPhaseFactory.apply(queryResult)), context);
|
||||
for (final DfsSearchResult dfsResult : resultList) {
|
||||
final SearchShardTarget searchShardTarget = dfsResult.getSearchShardTarget();
|
||||
Transport.Connection connection = context.getConnection(searchShardTarget.getNodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(context.getRequest(), dfsResult.id(), dfs);
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(context.getRequest(), dfsResult.getRequestId(), dfs);
|
||||
final int shardIndex = dfsResult.getShardIndex();
|
||||
searchTransportService.sendExecuteQuery(connection, querySearchRequest, context.getTask(),
|
||||
ActionListener.wrap(
|
||||
result -> counter.onResult(shardIndex, result, searchShardTarget),
|
||||
exception -> {
|
||||
new SearchActionListener<QuerySearchResult>(searchShardTarget, shardIndex) {
|
||||
|
||||
@Override
|
||||
protected void innerOnResponse(QuerySearchResult response) {
|
||||
counter.onResult(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
try {
|
||||
if (context.getLogger().isDebugEnabled()) {
|
||||
context.getLogger().debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase",
|
||||
|
@ -92,7 +97,8 @@ final class DfsQueryPhase extends SearchPhase {
|
|||
// release it again to be in the safe side
|
||||
context.sendReleaseSearchContext(querySearchRequest.id(), connection);
|
||||
}
|
||||
}));
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,15 +23,14 @@ import org.apache.logging.log4j.Logger;
|
|||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -45,13 +44,13 @@ import java.util.function.Function;
|
|||
final class FetchSearchPhase extends SearchPhase {
|
||||
private final AtomicArray<FetchSearchResult> fetchResults;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final AtomicArray<QuerySearchResultProvider> queryResults;
|
||||
private final AtomicArray<SearchPhaseResult> queryResults;
|
||||
private final Function<SearchResponse, SearchPhase> nextPhaseFactory;
|
||||
private final SearchPhaseContext context;
|
||||
private final Logger logger;
|
||||
private final InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> resultConsumer;
|
||||
private final InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> resultConsumer;
|
||||
|
||||
FetchSearchPhase(InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> resultConsumer,
|
||||
FetchSearchPhase(InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> resultConsumer,
|
||||
SearchPhaseController searchPhaseController,
|
||||
SearchPhaseContext context) {
|
||||
this(resultConsumer, searchPhaseController, context,
|
||||
|
@ -59,7 +58,7 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
(finalResponse) -> sendResponsePhase(finalResponse, context)));
|
||||
}
|
||||
|
||||
FetchSearchPhase(InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> resultConsumer,
|
||||
FetchSearchPhase(InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> resultConsumer,
|
||||
SearchPhaseController searchPhaseController,
|
||||
SearchPhaseContext context, Function<SearchResponse, SearchPhase> nextPhaseFactory) {
|
||||
super("fetch");
|
||||
|
@ -98,35 +97,35 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
private void innerRun() throws IOException {
|
||||
final int numShards = context.getNumShards();
|
||||
final boolean isScrollSearch = context.getRequest().scroll() != null;
|
||||
ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(isScrollSearch, queryResults);
|
||||
List<SearchPhaseResult> phaseResults = queryResults.asList();
|
||||
ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(isScrollSearch, phaseResults, context.getNumShards());
|
||||
String scrollId = isScrollSearch ? TransportSearchHelper.buildScrollId(queryResults) : null;
|
||||
List<AtomicArray.Entry<QuerySearchResultProvider>> queryResultsAsList = queryResults.asList();
|
||||
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce();
|
||||
final boolean queryAndFetchOptimization = queryResults.length() == 1;
|
||||
final Runnable finishPhase = ()
|
||||
-> moveToNextPhase(searchPhaseController, sortedShardDocs, scrollId, reducedQueryPhase, queryAndFetchOptimization ?
|
||||
queryResults : fetchResults);
|
||||
if (queryAndFetchOptimization) {
|
||||
assert queryResults.get(0) == null || queryResults.get(0).fetchResult() != null;
|
||||
assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null;
|
||||
// query AND fetch optimization
|
||||
finishPhase.run();
|
||||
} else {
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(numShards, sortedShardDocs);
|
||||
if (sortedShardDocs.length == 0) { // no docs to fetch -- sidestep everything and return
|
||||
queryResultsAsList.stream()
|
||||
.map(e -> e.value.queryResult())
|
||||
phaseResults.stream()
|
||||
.map(e -> e.queryResult())
|
||||
.forEach(this::releaseIrrelevantSearchContext); // we have to release contexts here to free up resources
|
||||
finishPhase.run();
|
||||
} else {
|
||||
final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch ?
|
||||
searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, sortedShardDocs, numShards)
|
||||
: null;
|
||||
final CountedCollector<FetchSearchResult> counter = new CountedCollector<>(fetchResults::set,
|
||||
final CountedCollector<FetchSearchResult> counter = new CountedCollector<>(r -> fetchResults.set(r.getShardIndex(), r),
|
||||
docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not
|
||||
finishPhase, context);
|
||||
for (int i = 0; i < docIdsToLoad.length; i++) {
|
||||
IntArrayList entry = docIdsToLoad[i];
|
||||
QuerySearchResultProvider queryResult = queryResults.get(i);
|
||||
SearchPhaseResult queryResult = queryResults.get(i);
|
||||
if (entry == null) { // no results for this shard ID
|
||||
if (queryResult != null) {
|
||||
// if we got some hits from this shard we have to release the context there
|
||||
|
@ -137,10 +136,10 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
// in any case we count down this result since we don't talk to this shard anymore
|
||||
counter.countDown();
|
||||
} else {
|
||||
Transport.Connection connection = context.getConnection(queryResult.shardTarget().getNodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult().id(), i, entry,
|
||||
Transport.Connection connection = context.getConnection(queryResult.getSearchShardTarget().getNodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult().getRequestId(), i, entry,
|
||||
lastEmittedDocPerShard);
|
||||
executeFetch(i, queryResult.shardTarget(), counter, fetchSearchRequest, queryResult.queryResult(),
|
||||
executeFetch(i, queryResult.getSearchShardTarget(), counter, fetchSearchRequest, queryResult.queryResult(),
|
||||
connection);
|
||||
}
|
||||
}
|
||||
|
@ -159,10 +158,10 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
final ShardFetchSearchRequest fetchSearchRequest, final QuerySearchResult querySearchResult,
|
||||
final Transport.Connection connection) {
|
||||
context.getSearchTransport().sendExecuteFetch(connection, fetchSearchRequest, context.getTask(),
|
||||
new ActionListener<FetchSearchResult>() {
|
||||
new SearchActionListener<FetchSearchResult>(shardTarget, shardIndex) {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
counter.onResult(shardIndex, result, shardTarget);
|
||||
public void innerOnResponse(FetchSearchResult result) {
|
||||
counter.onResult(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -191,8 +190,8 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
// and if it has at lease one hit that didn't make it to the global topDocs
|
||||
if (context.getRequest().scroll() == null && queryResult.hasHits()) {
|
||||
try {
|
||||
Transport.Connection connection = context.getConnection(queryResult.shardTarget().getNodeId());
|
||||
context.sendReleaseSearchContext(queryResult.id(), connection);
|
||||
Transport.Connection connection = context.getConnection(queryResult.getSearchShardTarget().getNodeId());
|
||||
context.sendReleaseSearchContext(queryResult.getRequestId(), connection);
|
||||
} catch (Exception e) {
|
||||
context.getLogger().trace("failed to release context", e);
|
||||
}
|
||||
|
@ -201,9 +200,9 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
|
||||
private void moveToNextPhase(SearchPhaseController searchPhaseController, ScoreDoc[] sortedDocs,
|
||||
String scrollId, SearchPhaseController.ReducedQueryPhase reducedQueryPhase,
|
||||
AtomicArray<? extends QuerySearchResultProvider> fetchResultsArr) {
|
||||
AtomicArray<? extends SearchPhaseResult> fetchResultsArr) {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(context.getRequest().scroll() != null,
|
||||
sortedDocs, reducedQueryPhase, fetchResultsArr);
|
||||
sortedDocs, reducedQueryPhase, fetchResultsArr.asList(), fetchResultsArr::get);
|
||||
context.executeNextPhase(this, nextPhaseFactory.apply(context.buildSearchResponse(internalResponse, scrollId)));
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.search;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
|
@ -144,10 +143,11 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
onShardFailure(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
try {
|
||||
executePhaseOnShard(shardIt, shard, new ActionListener<FirstResult>() {
|
||||
executePhaseOnShard(shardIt, shard, new SearchActionListener<FirstResult>(new SearchShardTarget(shard.currentNodeId(),
|
||||
shardIt.shardId()), shardIndex) {
|
||||
@Override
|
||||
public void onResponse(FirstResult result) {
|
||||
onShardResult(shardIndex, shard.currentNodeId(), result, shardIt);
|
||||
public void innerOnResponse(FirstResult result) {
|
||||
onShardResult(result, shardIt);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -164,9 +164,10 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
}
|
||||
}
|
||||
|
||||
private void onShardResult(int shardIndex, String nodeId, FirstResult result, ShardIterator shardIt) {
|
||||
result.shardTarget(new SearchShardTarget(nodeId, shardIt.shardId()));
|
||||
onShardSuccess(shardIndex, result);
|
||||
private void onShardResult(FirstResult result, ShardIterator shardIt) {
|
||||
assert result.getShardIndex() != -1 : "shard index is not set";
|
||||
assert result.getSearchShardTarget() != null : "search shard target must not be null";
|
||||
onShardSuccess(result);
|
||||
// we need to increment successful ops first before we compare the exit condition otherwise if we
|
||||
// are fast we could concurrently update totalOps but then preempt one of the threads which can
|
||||
// cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
|
||||
|
@ -185,7 +186,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
/**
|
||||
* Executed once all shard results have been received and processed
|
||||
* @see #onShardFailure(int, SearchShardTarget, Exception)
|
||||
* @see #onShardSuccess(int, SearchPhaseResult)
|
||||
* @see #onShardSuccess(SearchPhaseResult)
|
||||
*/
|
||||
abstract void onPhaseDone(); // as a tribute to @kimchy aka. finishHim()
|
||||
|
||||
|
@ -201,12 +202,10 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
|
||||
/**
|
||||
* Executed once for every successful shard level request.
|
||||
* @param shardIndex the internal index for this shard. Each shard has an index / ordinal assigned that is used to reference
|
||||
* it's results
|
||||
* @param result the result returned form the shard
|
||||
*
|
||||
*/
|
||||
abstract void onShardSuccess(int shardIndex, FirstResult result);
|
||||
abstract void onShardSuccess(FirstResult result);
|
||||
|
||||
/**
|
||||
* Sends the request to the actual shard.
|
||||
|
@ -214,7 +213,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
* @param shard the shard routing to send the request for
|
||||
* @param listener the listener to notify on response
|
||||
*/
|
||||
protected abstract void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener<FirstResult> listener);
|
||||
protected abstract void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, SearchActionListener<FirstResult> listener);
|
||||
|
||||
/**
|
||||
* This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing
|
||||
|
@ -237,17 +236,16 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
* A stream of all non-null (successful) shard results
|
||||
*/
|
||||
final Stream<Result> getSuccessfulResults() {
|
||||
return results.asList().stream().map(e -> e.value);
|
||||
return results.asList().stream();
|
||||
}
|
||||
|
||||
/**
|
||||
* Consumes a single shard result
|
||||
* @param shardIndex the shards index, this is a 0-based id that is used to establish a 1 to 1 mapping to the searched shards
|
||||
* @param result the shards result
|
||||
*/
|
||||
void consumeResult(int shardIndex, Result result) {
|
||||
assert results.get(shardIndex) == null : "shardIndex: " + shardIndex + " is already set";
|
||||
results.set(shardIndex, result);
|
||||
void consumeResult(Result result) {
|
||||
assert results.get(result.getShardIndex()) == null : "shardIndex: " + result.getShardIndex() + " is already set";
|
||||
results.set(result.getShardIndex(), result);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.ConnectionProfile;
|
||||
|
@ -59,7 +60,6 @@ import java.util.Set;
|
|||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.Semaphore;
|
||||
|
@ -373,10 +373,19 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
// here we pass on the connection since we can only close it once the sendRequest returns otherwise
|
||||
// due to the async nature (it will return before it's actually sent) this can cause the request to fail
|
||||
// due to an already closed connection.
|
||||
transportService.sendRequest(connection,
|
||||
ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY,
|
||||
ThreadPool threadPool = transportService.getThreadPool();
|
||||
ThreadContext threadContext = threadPool.getThreadContext();
|
||||
TransportService.ContextRestoreResponseHandler<ClusterStateResponse> responseHandler = new TransportService
|
||||
.ContextRestoreResponseHandler<>(threadContext.newRestorableContext(false),
|
||||
new SniffClusterStateResponseHandler(transportService, connection, listener, seedNodes,
|
||||
cancellableThreads));
|
||||
try (ThreadContext.StoredContext ignore = threadContext.stashContext()) {
|
||||
// we stash any context here since this is an internal execution and should not leak any
|
||||
// existing context information.
|
||||
threadContext.markAsSystemContext();
|
||||
transportService.sendRequest(connection, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY,
|
||||
responseHandler);
|
||||
}
|
||||
success = true;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
|
@ -445,6 +454,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
|
||||
@Override
|
||||
public void handleResponse(ClusterStateResponse response) {
|
||||
assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context";
|
||||
try {
|
||||
try (Closeable theConnection = connection) { // the connection is unused - see comment in #collectRemoteNodes
|
||||
// we have to close this connection before we notify listeners - this is mainly needed for test correctness
|
||||
|
@ -483,6 +493,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context";
|
||||
logger.warn((Supplier<?>)
|
||||
() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias),
|
||||
exp);
|
||||
|
@ -505,4 +516,9 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
assert connectHandler.running.availablePermits() == 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
boolean isNodeConnected(final DiscoveryNode node) {
|
||||
return connectedNodes.contains(node);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,8 +26,10 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
|
|||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.PlainShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -136,7 +138,7 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
// nodes can be tagged with node.attr.remote_gateway: true to allow a node to be a gateway node for
|
||||
// cross cluster search
|
||||
String attribute = REMOTE_NODE_ATTRIBUTE.get(settings);
|
||||
nodePredicate = nodePredicate.and((node) -> Boolean.getBoolean(node.getAttributes().getOrDefault(attribute, "false")));
|
||||
nodePredicate = nodePredicate.and((node) -> Booleans.parseBoolean(node.getAttributes().getOrDefault(attribute, "false")));
|
||||
}
|
||||
remoteClusters.putAll(this.remoteClusters);
|
||||
for (Map.Entry<String, List<DiscoveryNode>> entry : seeds.entrySet()) {
|
||||
|
@ -185,6 +187,10 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
return remoteClusters.isEmpty() == false;
|
||||
}
|
||||
|
||||
boolean isRemoteNodeConnected(final String remoteCluster, final DiscoveryNode node) {
|
||||
return remoteClusters.get(remoteCluster).isNodeConnected(node);
|
||||
}
|
||||
|
||||
/**
|
||||
* Groups indices per cluster by splitting remote cluster-alias, index-name pairs on {@link #REMOTE_CLUSTER_INDEX_SEPARATOR}. All
|
||||
* indices per cluster are collected as a list in the returned map keyed by the cluster alias. Local indices are grouped under
|
||||
|
@ -326,13 +332,20 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
}
|
||||
|
||||
void updateRemoteCluster(String clusterAlias, List<InetSocketAddress> addresses) {
|
||||
updateRemoteClusters(Collections.singletonMap(clusterAlias, addresses.stream().map(address -> {
|
||||
TransportAddress transportAddress = new TransportAddress(address);
|
||||
return new DiscoveryNode(clusterAlias + "#" + transportAddress.toString(),
|
||||
transportAddress,
|
||||
Version.CURRENT.minimumCompatibilityVersion());
|
||||
}).collect(Collectors.toList())),
|
||||
ActionListener.wrap((x) -> {}, (x) -> {}) );
|
||||
updateRemoteCluster(clusterAlias, addresses, ActionListener.wrap((x) -> {}, (x) -> {}));
|
||||
}
|
||||
|
||||
void updateRemoteCluster(
|
||||
final String clusterAlias,
|
||||
final List<InetSocketAddress> addresses,
|
||||
final ActionListener<Void> connectionListener) {
|
||||
final List<DiscoveryNode> nodes = addresses.stream().map(address -> {
|
||||
final TransportAddress transportAddress = new TransportAddress(address);
|
||||
final String id = clusterAlias + "#" + transportAddress.toString();
|
||||
final Version version = Version.CURRENT.minimumCompatibilityVersion();
|
||||
return new DiscoveryNode(id, transportAddress, version);
|
||||
}).collect(Collectors.toList());
|
||||
updateRemoteClusters(Collections.singletonMap(clusterAlias, nodes), connectionListener);
|
||||
}
|
||||
|
||||
static Map<String, List<DiscoveryNode>> buildRemoteClustersSeeds(Settings settings) {
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
|
||||
/**
|
||||
* An base action listener that ensures shard target and shard index is set on all responses
|
||||
* received by this listener.
|
||||
*/
|
||||
abstract class SearchActionListener<T extends SearchPhaseResult> implements ActionListener<T> {
|
||||
private final int requestIndex;
|
||||
private final SearchShardTarget searchShardTarget;
|
||||
|
||||
protected SearchActionListener(SearchShardTarget searchShardTarget,
|
||||
int shardIndex) {
|
||||
assert shardIndex >= 0 : "shard index must be positive";
|
||||
this.searchShardTarget = searchShardTarget;
|
||||
this.requestIndex = shardIndex;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void onResponse(T response) {
|
||||
response.setShardIndex(requestIndex);
|
||||
setSearchShardTarget(response);
|
||||
innerOnResponse(response);
|
||||
}
|
||||
|
||||
protected void setSearchShardTarget(T response) { // some impls need to override this
|
||||
response.setSearchShardTarget(searchShardTarget);
|
||||
}
|
||||
|
||||
protected abstract void innerOnResponse(T response);
|
||||
|
||||
}
|
|
@ -72,7 +72,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
|
|||
protected void executePhaseOnShard(
|
||||
final ShardIterator shardIt,
|
||||
final ShardRouting shard,
|
||||
final ActionListener<DfsSearchResult> listener) {
|
||||
final SearchActionListener<DfsSearchResult> listener) {
|
||||
getSearchTransport().sendExecuteDfs(getConnection(shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt, shard) , getTask(), listener);
|
||||
}
|
||||
|
|
|
@ -93,8 +93,8 @@ interface SearchPhaseContext extends ActionListener<SearchResponse>, Executor {
|
|||
|
||||
/**
|
||||
* Releases a search context with the given context ID on the node the given connection is connected to.
|
||||
* @see org.elasticsearch.search.query.QuerySearchResult#id()
|
||||
* @see org.elasticsearch.search.fetch.FetchSearchResult#id()
|
||||
* @see org.elasticsearch.search.query.QuerySearchResult#getRequestId()
|
||||
* @see org.elasticsearch.search.fetch.FetchSearchResult#getRequestId()
|
||||
*
|
||||
*/
|
||||
default void sendReleaseSearchContext(long contextId, Transport.Connection connection) {
|
||||
|
|
|
@ -36,10 +36,10 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
|
@ -52,7 +52,6 @@ import org.elasticsearch.search.internal.InternalSearchResponse;
|
|||
import org.elasticsearch.search.profile.ProfileShardResult;
|
||||
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.search.suggest.Suggest.Suggestion;
|
||||
import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry;
|
||||
|
@ -61,14 +60,16 @@ import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.IntFunction;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
public class SearchPhaseController extends AbstractComponent {
|
||||
public final class SearchPhaseController extends AbstractComponent {
|
||||
|
||||
private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0];
|
||||
|
||||
|
@ -81,13 +82,13 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
this.scriptService = scriptService;
|
||||
}
|
||||
|
||||
public AggregatedDfs aggregateDfs(AtomicArray<DfsSearchResult> results) {
|
||||
public AggregatedDfs aggregateDfs(Collection<DfsSearchResult> results) {
|
||||
ObjectObjectHashMap<Term, TermStatistics> termStatistics = HppcMaps.newNoNullKeysMap();
|
||||
ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
|
||||
long aggMaxDoc = 0;
|
||||
for (AtomicArray.Entry<DfsSearchResult> lEntry : results.asList()) {
|
||||
final Term[] terms = lEntry.value.terms();
|
||||
final TermStatistics[] stats = lEntry.value.termStatistics();
|
||||
for (DfsSearchResult lEntry : results) {
|
||||
final Term[] terms = lEntry.terms();
|
||||
final TermStatistics[] stats = lEntry.termStatistics();
|
||||
assert terms.length == stats.length;
|
||||
for (int i = 0; i < terms.length; i++) {
|
||||
assert terms[i] != null;
|
||||
|
@ -105,9 +106,9 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
|
||||
}
|
||||
|
||||
assert !lEntry.value.fieldStatistics().containsKey(null);
|
||||
final Object[] keys = lEntry.value.fieldStatistics().keys;
|
||||
final Object[] values = lEntry.value.fieldStatistics().values;
|
||||
assert !lEntry.fieldStatistics().containsKey(null);
|
||||
final Object[] keys = lEntry.fieldStatistics().keys;
|
||||
final Object[] values = lEntry.fieldStatistics().values;
|
||||
for (int i = 0; i < keys.length; i++) {
|
||||
if (keys[i] != null) {
|
||||
String key = (String) keys[i];
|
||||
|
@ -127,7 +128,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
}
|
||||
aggMaxDoc += lEntry.value.maxDoc();
|
||||
aggMaxDoc += lEntry.maxDoc();
|
||||
}
|
||||
return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc);
|
||||
}
|
||||
|
@ -146,10 +147,9 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
*
|
||||
* @param ignoreFrom Whether to ignore the from and sort all hits in each shard result.
|
||||
* Enabled only for scroll search, because that only retrieves hits of length 'size' in the query phase.
|
||||
* @param resultsArr Shard result holder
|
||||
* @param results Shard result holder
|
||||
*/
|
||||
public ScoreDoc[] sortDocs(boolean ignoreFrom, AtomicArray<? extends QuerySearchResultProvider> resultsArr) throws IOException {
|
||||
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> results = resultsArr.asList();
|
||||
public ScoreDoc[] sortDocs(boolean ignoreFrom, Collection<? extends SearchPhaseResult> results, int numShards) throws IOException {
|
||||
if (results.isEmpty()) {
|
||||
return EMPTY_DOCS;
|
||||
}
|
||||
|
@ -159,25 +159,25 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
int shardIndex = -1;
|
||||
if (results.size() == 1) {
|
||||
canOptimize = true;
|
||||
result = results.get(0).value.queryResult();
|
||||
shardIndex = results.get(0).index;
|
||||
result = results.stream().findFirst().get().queryResult();
|
||||
shardIndex = result.getShardIndex();
|
||||
} else {
|
||||
boolean hasResult = false;
|
||||
QuerySearchResult resultToOptimize = null;
|
||||
// lets see if we only got hits from a single shard, if so, we can optimize...
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : results) {
|
||||
if (entry.value.queryResult().hasHits()) {
|
||||
for (SearchPhaseResult entry : results) {
|
||||
if (entry.queryResult().hasHits()) {
|
||||
if (hasResult) { // we already have one, can't really optimize
|
||||
canOptimize = false;
|
||||
break;
|
||||
}
|
||||
canOptimize = true;
|
||||
hasResult = true;
|
||||
resultToOptimize = entry.value.queryResult();
|
||||
shardIndex = entry.index;
|
||||
resultToOptimize = entry.queryResult();
|
||||
shardIndex = resultToOptimize.getShardIndex();
|
||||
}
|
||||
}
|
||||
result = canOptimize ? resultToOptimize : results.get(0).value.queryResult();
|
||||
result = canOptimize ? resultToOptimize : results.stream().findFirst().get().queryResult();
|
||||
assert result != null;
|
||||
}
|
||||
if (canOptimize) {
|
||||
|
@ -228,22 +228,21 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
final int from = ignoreFrom ? 0 : result.queryResult().from();
|
||||
|
||||
final TopDocs mergedTopDocs;
|
||||
final int numShards = resultsArr.length();
|
||||
if (result.queryResult().topDocs() instanceof CollapseTopFieldDocs) {
|
||||
CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) result.queryResult().topDocs();
|
||||
final Sort sort = new Sort(firstTopDocs.fields);
|
||||
final CollapseTopFieldDocs[] shardTopDocs = new CollapseTopFieldDocs[numShards];
|
||||
fillTopDocs(shardTopDocs, results, new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0],
|
||||
sort.getSort(), new Object[0], Float.NaN));
|
||||
mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs);
|
||||
mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs, true);
|
||||
} else if (result.queryResult().topDocs() instanceof TopFieldDocs) {
|
||||
TopFieldDocs firstTopDocs = (TopFieldDocs) result.queryResult().topDocs();
|
||||
final Sort sort = new Sort(firstTopDocs.fields);
|
||||
final TopFieldDocs[] shardTopDocs = new TopFieldDocs[resultsArr.length()];
|
||||
final TopFieldDocs[] shardTopDocs = new TopFieldDocs[numShards];
|
||||
fillTopDocs(shardTopDocs, results, new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN));
|
||||
mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs, true);
|
||||
} else {
|
||||
final TopDocs[] shardTopDocs = new TopDocs[resultsArr.length()];
|
||||
final TopDocs[] shardTopDocs = new TopDocs[numShards];
|
||||
fillTopDocs(shardTopDocs, results, Lucene.EMPTY_TOP_DOCS);
|
||||
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs, true);
|
||||
}
|
||||
|
@ -251,11 +250,11 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
ScoreDoc[] scoreDocs = mergedTopDocs.scoreDocs;
|
||||
final Map<String, List<Suggestion<CompletionSuggestion.Entry>>> groupedCompletionSuggestions = new HashMap<>();
|
||||
// group suggestions and assign shard index
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
|
||||
Suggest shardSuggest = sortedResult.value.queryResult().suggest();
|
||||
for (SearchPhaseResult sortedResult : results) {
|
||||
Suggest shardSuggest = sortedResult.queryResult().suggest();
|
||||
if (shardSuggest != null) {
|
||||
for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) {
|
||||
suggestion.setShardIndex(sortedResult.index);
|
||||
suggestion.setShardIndex(sortedResult.getShardIndex());
|
||||
List<Suggestion<CompletionSuggestion.Entry>> suggestions =
|
||||
groupedCompletionSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>());
|
||||
suggestions.add(suggestion);
|
||||
|
@ -286,17 +285,16 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
|
||||
static <T extends TopDocs> void fillTopDocs(T[] shardTopDocs,
|
||||
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> results,
|
||||
T empytTopDocs) {
|
||||
Collection<? extends SearchPhaseResult> results, T empytTopDocs) {
|
||||
if (results.size() != shardTopDocs.length) {
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
Arrays.fill(shardTopDocs, empytTopDocs);
|
||||
}
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> resultProvider : results) {
|
||||
final T topDocs = (T) resultProvider.value.queryResult().topDocs();
|
||||
for (SearchPhaseResult resultProvider : results) {
|
||||
final T topDocs = (T) resultProvider.queryResult().topDocs();
|
||||
assert topDocs != null : "top docs must not be null in a valid result";
|
||||
// the 'index' field is the position in the resultsArr atomic array
|
||||
shardTopDocs[resultProvider.index] = topDocs;
|
||||
shardTopDocs[resultProvider.getShardIndex()] = topDocs;
|
||||
}
|
||||
}
|
||||
public ScoreDoc[] getLastEmittedDocPerShard(ReducedQueryPhase reducedQueryPhase,
|
||||
|
@ -340,12 +338,11 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
*/
|
||||
public InternalSearchResponse merge(boolean ignoreFrom, ScoreDoc[] sortedDocs,
|
||||
ReducedQueryPhase reducedQueryPhase,
|
||||
AtomicArray<? extends QuerySearchResultProvider> fetchResultsArr) {
|
||||
Collection<? extends SearchPhaseResult> fetchResults, IntFunction<SearchPhaseResult> resultsLookup) {
|
||||
if (reducedQueryPhase.isEmpty()) {
|
||||
return InternalSearchResponse.empty();
|
||||
}
|
||||
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> fetchResults = fetchResultsArr.asList();
|
||||
SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, sortedDocs, fetchResultsArr);
|
||||
SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, sortedDocs, fetchResults, resultsLookup);
|
||||
if (reducedQueryPhase.suggest != null) {
|
||||
if (!fetchResults.isEmpty()) {
|
||||
int currentOffset = hits.getHits().length;
|
||||
|
@ -353,7 +350,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
final List<CompletionSuggestion.Entry.Option> suggestionOptions = suggestion.getOptions();
|
||||
for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) {
|
||||
ScoreDoc shardDoc = sortedDocs[scoreDocIndex];
|
||||
QuerySearchResultProvider searchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
|
||||
SearchPhaseResult searchResultProvider = resultsLookup.apply(shardDoc.shardIndex);
|
||||
if (searchResultProvider == null) {
|
||||
continue;
|
||||
}
|
||||
|
@ -364,7 +361,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
CompletionSuggestion.Entry.Option suggestOption =
|
||||
suggestionOptions.get(scoreDocIndex - currentOffset);
|
||||
hit.score(shardDoc.score);
|
||||
hit.shard(fetchResult.shardTarget());
|
||||
hit.shard(fetchResult.getSearchShardTarget());
|
||||
suggestOption.setHit(hit);
|
||||
}
|
||||
}
|
||||
|
@ -377,8 +374,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
|
||||
private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFrom, ScoreDoc[] sortedDocs,
|
||||
AtomicArray<? extends QuerySearchResultProvider> fetchResultsArr) {
|
||||
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> fetchResults = fetchResultsArr.asList();
|
||||
Collection<? extends SearchPhaseResult> fetchResults, IntFunction<SearchPhaseResult> resultsLookup) {
|
||||
boolean sorted = false;
|
||||
int sortScoreIndex = -1;
|
||||
if (reducedQueryPhase.oneResult.topDocs() instanceof TopFieldDocs) {
|
||||
|
@ -396,8 +392,8 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
// clean the fetch counter
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : fetchResults) {
|
||||
entry.value.fetchResult().initCounter();
|
||||
for (SearchPhaseResult entry : fetchResults) {
|
||||
entry.fetchResult().initCounter();
|
||||
}
|
||||
int from = ignoreFrom ? 0 : reducedQueryPhase.oneResult.queryResult().from();
|
||||
int numSearchHits = (int) Math.min(reducedQueryPhase.fetchHits - from, reducedQueryPhase.oneResult.size());
|
||||
|
@ -408,7 +404,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
if (!fetchResults.isEmpty()) {
|
||||
for (int i = 0; i < numSearchHits; i++) {
|
||||
ScoreDoc shardDoc = sortedDocs[i];
|
||||
QuerySearchResultProvider fetchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
|
||||
SearchPhaseResult fetchResultProvider = resultsLookup.apply(shardDoc.shardIndex);
|
||||
if (fetchResultProvider == null) {
|
||||
continue;
|
||||
}
|
||||
|
@ -417,7 +413,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
if (index < fetchResult.hits().internalHits().length) {
|
||||
SearchHit searchHit = fetchResult.hits().internalHits()[index];
|
||||
searchHit.score(shardDoc.score);
|
||||
searchHit.shard(fetchResult.shardTarget());
|
||||
searchHit.shard(fetchResult.getSearchShardTarget());
|
||||
if (sorted) {
|
||||
FieldDoc fieldDoc = (FieldDoc) shardDoc;
|
||||
searchHit.sortValues(fieldDoc.fields, reducedQueryPhase.oneResult.sortValueFormats());
|
||||
|
@ -437,7 +433,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
* Reduces the given query results and consumes all aggregations and profile results.
|
||||
* @param queryResults a list of non-null query shard results
|
||||
*/
|
||||
public final ReducedQueryPhase reducedQueryPhase(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults) {
|
||||
public ReducedQueryPhase reducedQueryPhase(List<? extends SearchPhaseResult> queryResults) {
|
||||
return reducedQueryPhase(queryResults, null, 0);
|
||||
}
|
||||
|
||||
|
@ -450,7 +446,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
* @see QuerySearchResult#consumeAggs()
|
||||
* @see QuerySearchResult#consumeProfileResult()
|
||||
*/
|
||||
private ReducedQueryPhase reducedQueryPhase(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults,
|
||||
private ReducedQueryPhase reducedQueryPhase(Collection<? extends SearchPhaseResult> queryResults,
|
||||
List<InternalAggregations> bufferdAggs, int numReducePhases) {
|
||||
assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases;
|
||||
numReducePhases++; // increment for this phase
|
||||
|
@ -463,7 +459,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, null, null, null, null,
|
||||
numReducePhases);
|
||||
}
|
||||
final QuerySearchResult firstResult = queryResults.get(0).value.queryResult();
|
||||
final QuerySearchResult firstResult = queryResults.stream().findFirst().get().queryResult();
|
||||
final boolean hasSuggest = firstResult.suggest() != null;
|
||||
final boolean hasProfileResults = firstResult.hasProfileResults();
|
||||
final boolean consumeAggs;
|
||||
|
@ -487,8 +483,8 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
final Map<String, List<Suggestion>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap();
|
||||
final Map<String, ProfileShardResult> profileResults = hasProfileResults ? new HashMap<>(queryResults.size())
|
||||
: Collections.emptyMap();
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
|
||||
QuerySearchResult result = entry.value.queryResult();
|
||||
for (SearchPhaseResult entry : queryResults) {
|
||||
QuerySearchResult result = entry.queryResult();
|
||||
if (result.searchTimedOut()) {
|
||||
timedOut = true;
|
||||
}
|
||||
|
@ -515,7 +511,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
aggregationsList.add((InternalAggregations) result.consumeAggs());
|
||||
}
|
||||
if (hasProfileResults) {
|
||||
String key = result.shardTarget().toString();
|
||||
String key = result.getSearchShardTarget().toString();
|
||||
profileResults.put(key, result.consumeProfileResult());
|
||||
}
|
||||
}
|
||||
|
@ -601,7 +597,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
|
||||
/**
|
||||
* Creates a new search response from the given merged hits.
|
||||
* @see #merge(boolean, ScoreDoc[], ReducedQueryPhase, AtomicArray)
|
||||
* @see #merge(boolean, ScoreDoc[], ReducedQueryPhase, Collection, IntFunction)
|
||||
*/
|
||||
public InternalSearchResponse buildResponse(SearchHits hits) {
|
||||
return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly, numReducePhases);
|
||||
|
@ -622,7 +618,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
* iff the buffer is exhausted.
|
||||
*/
|
||||
static final class QueryPhaseResultConsumer
|
||||
extends InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> {
|
||||
extends InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> {
|
||||
private final InternalAggregations[] buffer;
|
||||
private int index;
|
||||
private final SearchPhaseController controller;
|
||||
|
@ -649,8 +645,8 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void consumeResult(int shardIndex, QuerySearchResultProvider result) {
|
||||
super.consumeResult(shardIndex, result);
|
||||
public void consumeResult(SearchPhaseResult result) {
|
||||
super.consumeResult(result);
|
||||
QuerySearchResult queryResult = result.queryResult();
|
||||
assert queryResult.hasAggs() : "this collector should only be used if aggs are requested";
|
||||
consumeInternal(queryResult);
|
||||
|
@ -691,7 +687,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
/**
|
||||
* Returns a new SearchPhaseResults instance. This might return an instance that reduces search responses incrementally.
|
||||
*/
|
||||
InitialSearchPhase.SearchPhaseResults<QuerySearchResultProvider> newSearchPhaseResults(SearchRequest request, int numShards) {
|
||||
InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> newSearchPhaseResults(SearchRequest request, int numShards) {
|
||||
SearchSourceBuilder source = request.source();
|
||||
if (source != null && source.aggregations() != null) {
|
||||
if (request.getBatchedReduceSize() < numShards) {
|
||||
|
|
|
@ -24,8 +24,8 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.util.Map;
|
||||
|
@ -33,7 +33,7 @@ import java.util.concurrent.Executor;
|
|||
import java.util.function.Function;
|
||||
|
||||
final class SearchQueryThenFetchAsyncAction
|
||||
extends AbstractSearchAsyncAction<QuerySearchResultProvider> {
|
||||
extends AbstractSearchAsyncAction<SearchPhaseResult> {
|
||||
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
|
@ -69,11 +69,10 @@ final class SearchQueryThenFetchAsyncAction
|
|||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
|
||||
protected void executePhaseOnShard(
|
||||
final ShardIterator shardIt,
|
||||
final ShardRouting shard,
|
||||
final ActionListener<QuerySearchResultProvider> listener) {
|
||||
final SearchActionListener<SearchPhaseResult> listener) {
|
||||
getSearchTransport().sendExecuteQuery(
|
||||
getConnection(shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt, shard),
|
||||
|
@ -83,9 +82,8 @@ final class SearchQueryThenFetchAsyncAction
|
|||
|
||||
@Override
|
||||
protected SearchPhase getNextPhase(
|
||||
final SearchPhaseResults<QuerySearchResultProvider> results,
|
||||
final SearchPhaseResults<SearchPhaseResult> results,
|
||||
final SearchPhaseContext context) {
|
||||
return new FetchSearchPhase(results, searchPhaseController, context);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -32,13 +32,14 @@ import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
|||
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
||||
final class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final Logger logger;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
@ -70,21 +71,17 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
|||
this.queryFetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
private ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
List<ShardSearchFailure> failures = shardFailures.asList();
|
||||
return failures.toArray(new ShardSearchFailure[failures.size()]);
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
private void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
@ -130,15 +127,20 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
|||
|
||||
void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchTransportService.sendExecuteFetch(node, internalRequest, task, new ActionListener<ScrollQueryFetchSearchResult>() {
|
||||
searchTransportService.sendExecuteScrollFetch(node, internalRequest, task,
|
||||
new SearchActionListener<ScrollQueryFetchSearchResult>(null, shardIndex) {
|
||||
@Override
|
||||
public void onResponse(ScrollQueryFetchSearchResult result) {
|
||||
queryFetchResults.set(shardIndex, result.result());
|
||||
protected void setSearchShardTarget(ScrollQueryFetchSearchResult response) {
|
||||
// don't do this - it's part of the response...
|
||||
assert response.getSearchShardTarget() != null : "search shard target must not be null";
|
||||
}
|
||||
@Override
|
||||
protected void innerOnResponse(ScrollQueryFetchSearchResult response) {
|
||||
queryFetchResults.set(response.getShardIndex(), response.result());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
onPhaseFailure(t, searchId, shardIndex);
|
||||
|
@ -170,9 +172,10 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
|||
}
|
||||
|
||||
private void innerFinishHim() throws Exception {
|
||||
ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
List<QueryFetchSearchResult> queryFetchSearchResults = queryFetchResults.asList();
|
||||
ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults.asList(), queryFetchResults.length());
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs,
|
||||
searchPhaseController.reducedQueryPhase(queryFetchResults.asList()), queryFetchResults);
|
||||
searchPhaseController.reducedQueryPhase(queryFetchSearchResults), queryFetchSearchResults, queryFetchResults::get);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchRequest;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
|
@ -41,7 +42,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
||||
final class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final Logger logger;
|
||||
private final SearchTask task;
|
||||
|
@ -73,21 +74,17 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
this.fetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
private ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
List<ShardSearchFailure> failures = shardFailures.asList();
|
||||
return failures.toArray(new ShardSearchFailure[failures.size()]);
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
private void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
@ -99,8 +96,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
final AtomicInteger counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
final CountDown counter = new CountDown(scrollId.getContext().length);
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
|
@ -112,7 +108,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource());
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (counter.countDown()) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Exception e) {
|
||||
|
@ -124,13 +120,21 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
}
|
||||
}
|
||||
|
||||
private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) {
|
||||
private void executeQueryPhase(final int shardIndex, final CountDown counter, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchTransportService.sendExecuteQuery(node, internalRequest, task, new ActionListener<ScrollQuerySearchResult>() {
|
||||
searchTransportService.sendExecuteScrollQuery(node, internalRequest, task,
|
||||
new SearchActionListener<ScrollQuerySearchResult>(null, shardIndex) {
|
||||
|
||||
@Override
|
||||
public void onResponse(ScrollQuerySearchResult result) {
|
||||
queryResults.set(shardIndex, result.queryResult());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
protected void setSearchShardTarget(ScrollQuerySearchResult response) {
|
||||
// don't do this - it's part of the response...
|
||||
assert response.getSearchShardTarget() != null : "search shard target must not be null";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void innerOnResponse(ScrollQuerySearchResult result) {
|
||||
queryResults.setOnce(result.getShardIndex(), result.queryResult());
|
||||
if (counter.countDown()) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Exception e) {
|
||||
|
@ -146,13 +150,13 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
});
|
||||
}
|
||||
|
||||
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) {
|
||||
void onQueryPhaseFailure(final int shardIndex, final CountDown counter, final long searchId, Exception failure) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(failure));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (counter.countDown()) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", failure, buildShardFailures()));
|
||||
} else {
|
||||
|
@ -167,7 +171,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
}
|
||||
|
||||
private void executeFetchPhase() throws Exception {
|
||||
sortedShardDocs = searchPhaseController.sortDocs(true, queryResults);
|
||||
sortedShardDocs = searchPhaseController.sortDocs(true, queryResults.asList(), queryResults.length());
|
||||
if (sortedShardDocs.length == 0) {
|
||||
finishHim(searchPhaseController.reducedQueryPhase(queryResults.asList()));
|
||||
return;
|
||||
|
@ -177,21 +181,21 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResults.asList());
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, sortedShardDocs,
|
||||
queryResults.length());
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.length);
|
||||
final CountDown counter = new CountDown(docIdsToLoad.length);
|
||||
for (int i = 0; i < docIdsToLoad.length; i++) {
|
||||
final int index = i;
|
||||
final IntArrayList docIds = docIdsToLoad[index];
|
||||
if (docIds != null) {
|
||||
final QuerySearchResult querySearchResult = queryResults.get(index);
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[index];
|
||||
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc);
|
||||
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().getNodeId());
|
||||
searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, task, new ActionListener<FetchSearchResult>() {
|
||||
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.getRequestId(), docIds, lastEmittedDoc);
|
||||
DiscoveryNode node = nodes.get(querySearchResult.getSearchShardTarget().getNodeId());
|
||||
searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, task,
|
||||
new SearchActionListener<FetchSearchResult>(querySearchResult.getSearchShardTarget(), index) {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(querySearchResult.shardTarget());
|
||||
fetchResults.set(index, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
protected void innerOnResponse(FetchSearchResult response) {
|
||||
fetchResults.setOnce(response.getShardIndex(), response);
|
||||
if (counter.countDown()) {
|
||||
finishHim(reducedQueryPhase);
|
||||
}
|
||||
}
|
||||
|
@ -202,14 +206,14 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
logger.debug("Failed to execute fetch phase", t);
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (counter.countDown()) {
|
||||
finishHim(reducedQueryPhase);
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// the counter is set to the total size of docIdsToLoad which can have null values so we have to count them down too
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (counter.countDown()) {
|
||||
finishHim(reducedQueryPhase);
|
||||
}
|
||||
}
|
||||
|
@ -218,7 +222,8 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
|
||||
private void finishHim(SearchPhaseController.ReducedQueryPhase queryPhase) {
|
||||
try {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryPhase, fetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryPhase,
|
||||
fetchResults.asList(), fetchResults::get);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
|
@ -42,7 +43,6 @@ import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
|||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -118,17 +118,17 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
}
|
||||
|
||||
public void sendExecuteDfs(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task,
|
||||
final ActionListener<DfsSearchResult> listener) {
|
||||
final SearchActionListener<DfsSearchResult> listener) {
|
||||
transportService.sendChildRequest(connection, DFS_ACTION_NAME, request, task,
|
||||
new ActionListenerResponseHandler<>(listener, DfsSearchResult::new));
|
||||
}
|
||||
|
||||
public void sendExecuteQuery(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task,
|
||||
final ActionListener<QuerySearchResultProvider> listener) {
|
||||
final SearchActionListener<SearchPhaseResult> listener) {
|
||||
// we optimize this and expect a QueryFetchSearchResult if we only have a single shard in the search request
|
||||
// this used to be the QUERY_AND_FETCH which doesn't exists anymore.
|
||||
final boolean fetchDocuments = request.numberOfShards() == 1;
|
||||
Supplier<QuerySearchResultProvider> supplier = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new;
|
||||
Supplier<SearchPhaseResult> supplier = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new;
|
||||
if (connection.getVersion().onOrBefore(Version.V_5_3_0_UNRELEASED) && fetchDocuments) {
|
||||
// TODO this BWC layer can be removed once this is back-ported to 5.3
|
||||
transportService.sendChildRequest(connection, QUERY_FETCH_ACTION_NAME, request, task,
|
||||
|
@ -140,35 +140,35 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
}
|
||||
|
||||
public void sendExecuteQuery(Transport.Connection connection, final QuerySearchRequest request, SearchTask task,
|
||||
final ActionListener<QuerySearchResult> listener) {
|
||||
final SearchActionListener<QuerySearchResult> listener) {
|
||||
transportService.sendChildRequest(connection, QUERY_ID_ACTION_NAME, request, task,
|
||||
new ActionListenerResponseHandler<>(listener, QuerySearchResult::new));
|
||||
}
|
||||
|
||||
public void sendExecuteQuery(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task,
|
||||
final ActionListener<ScrollQuerySearchResult> listener) {
|
||||
public void sendExecuteScrollQuery(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task,
|
||||
final SearchActionListener<ScrollQuerySearchResult> listener) {
|
||||
transportService.sendChildRequest(transportService.getConnection(node), QUERY_SCROLL_ACTION_NAME, request, task,
|
||||
new ActionListenerResponseHandler<>(listener, ScrollQuerySearchResult::new));
|
||||
}
|
||||
|
||||
public void sendExecuteFetch(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task,
|
||||
final ActionListener<ScrollQueryFetchSearchResult> listener) {
|
||||
public void sendExecuteScrollFetch(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task,
|
||||
final SearchActionListener<ScrollQueryFetchSearchResult> listener) {
|
||||
transportService.sendChildRequest(transportService.getConnection(node), QUERY_FETCH_SCROLL_ACTION_NAME, request, task,
|
||||
new ActionListenerResponseHandler<>(listener, ScrollQueryFetchSearchResult::new));
|
||||
}
|
||||
|
||||
public void sendExecuteFetch(Transport.Connection connection, final ShardFetchSearchRequest request, SearchTask task,
|
||||
final ActionListener<FetchSearchResult> listener) {
|
||||
final SearchActionListener<FetchSearchResult> listener) {
|
||||
sendExecuteFetch(connection, FETCH_ID_ACTION_NAME, request, task, listener);
|
||||
}
|
||||
|
||||
public void sendExecuteFetchScroll(DiscoveryNode node, final ShardFetchRequest request, SearchTask task,
|
||||
final ActionListener<FetchSearchResult> listener) {
|
||||
final SearchActionListener<FetchSearchResult> listener) {
|
||||
sendExecuteFetch(transportService.getConnection(node), FETCH_ID_SCROLL_ACTION_NAME, request, task, listener);
|
||||
}
|
||||
|
||||
private void sendExecuteFetch(Transport.Connection connection, String action, final ShardFetchRequest request, SearchTask task,
|
||||
final ActionListener<FetchSearchResult> listener) {
|
||||
final SearchActionListener<FetchSearchResult> listener) {
|
||||
transportService.sendChildRequest(connection, action, request, task,
|
||||
new ActionListenerResponseHandler<>(listener, FetchSearchResult::new));
|
||||
}
|
||||
|
@ -327,7 +327,7 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
new TaskAwareTransportRequestHandler<ShardSearchTransportRequest>() {
|
||||
@Override
|
||||
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
QuerySearchResultProvider result = searchService.executeQueryPhase(request, (SearchTask)task);
|
||||
SearchPhaseResult result = searchService.executeQueryPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
}
|
||||
});
|
||||
|
@ -361,7 +361,7 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
@Override
|
||||
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
assert request.numberOfShards() == 1 : "expected single shard request but got: " + request.numberOfShards();
|
||||
QuerySearchResultProvider result = searchService.executeQueryPhase(request, (SearchTask)task);
|
||||
SearchPhaseResult result = searchService.executeQueryPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -38,10 +38,9 @@ final class TransportSearchHelper {
|
|||
try (RAMOutputStream out = new RAMOutputStream()) {
|
||||
out.writeString(searchPhaseResults.length() == 1 ? ParsedScrollId.QUERY_AND_FETCH_TYPE : ParsedScrollId.QUERY_THEN_FETCH_TYPE);
|
||||
out.writeVInt(searchPhaseResults.asList().size());
|
||||
for (AtomicArray.Entry<? extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
|
||||
SearchPhaseResult searchPhaseResult = entry.value;
|
||||
out.writeLong(searchPhaseResult.id());
|
||||
out.writeString(searchPhaseResult.shardTarget().getNodeId());
|
||||
for (SearchPhaseResult searchPhaseResult : searchPhaseResults.asList()) {
|
||||
out.writeLong(searchPhaseResult.getRequestId());
|
||||
out.writeString(searchPhaseResult.getSearchShardTarget().getNodeId());
|
||||
}
|
||||
byte[] bytes = new byte[(int) out.getFilePointer()];
|
||||
out.writeTo(bytes, 0);
|
||||
|
|
|
@ -22,16 +22,12 @@ package org.elasticsearch.action.support.replication;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.WriteResponse;
|
||||
import org.elasticsearch.client.transport.NoNodeAvailableException;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -46,7 +42,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.apache.logging.log4j.core.pattern.ConverterKeys;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -302,15 +297,21 @@ public abstract class TransportWriteAction<
|
|||
}
|
||||
|
||||
void run() {
|
||||
// we either respond immediately ie. if we we don't fsync per request or wait for refresh
|
||||
// OR we got an pass async operations on and wait for them to return to respond.
|
||||
indexShard.maybeFlush();
|
||||
maybeFinish(); // decrement the pendingOpts by one, if there is nothing else to do we just respond with success.
|
||||
/*
|
||||
* We either respond immediately (i.e., if we do not fsync per request or wait for
|
||||
* refresh), or we there are past async operations and we wait for them to return to
|
||||
* respond.
|
||||
*/
|
||||
indexShard.afterWriteOperation();
|
||||
// decrement pending by one, if there is nothing else to do we just respond with success
|
||||
maybeFinish();
|
||||
if (waitUntilRefresh) {
|
||||
assert pendingOps.get() > 0;
|
||||
indexShard.addRefreshListener(location, forcedRefresh -> {
|
||||
if (forcedRefresh) {
|
||||
logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request);
|
||||
logger.warn(
|
||||
"block until refresh ran out of slots and forced a refresh: [{}]",
|
||||
request);
|
||||
}
|
||||
refreshed.set(forcedRefresh);
|
||||
maybeFinish();
|
||||
|
|
|
@ -135,14 +135,14 @@ public abstract class TransportTasksAction<
|
|||
}
|
||||
List<TaskResponse> results = new ArrayList<>();
|
||||
List<TaskOperationFailure> exceptions = new ArrayList<>();
|
||||
for (AtomicArray.Entry<Tuple<TaskResponse, Exception>> response : responses.asList()) {
|
||||
if (response.value.v1() == null) {
|
||||
assert response.value.v2() != null;
|
||||
for (Tuple<TaskResponse, Exception> response : responses.asList()) {
|
||||
if (response.v1() == null) {
|
||||
assert response.v2() != null;
|
||||
exceptions.add(new TaskOperationFailure(clusterService.localNode().getId(), tasks.get(taskIndex).getId(),
|
||||
response.value.v2()));
|
||||
response.v2()));
|
||||
} else {
|
||||
assert response.value.v2() == null;
|
||||
results.add(response.value.v1());
|
||||
assert response.v2() == null;
|
||||
results.add(response.v1());
|
||||
}
|
||||
}
|
||||
listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions));
|
||||
|
|
|
@ -122,6 +122,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
.setRefreshPolicy(request.getRefreshPolicy())
|
||||
.routing(request.routing())
|
||||
.parent(request.parent())
|
||||
.timeout(request.timeout())
|
||||
.waitForActiveShards(request.waitForActiveShards());
|
||||
if (request.versionType() != VersionType.INTERNAL) {
|
||||
// in all but the internal versioning mode, we want to create the new document using the given version.
|
||||
|
@ -188,12 +189,14 @@ public class UpdateHelper extends AbstractComponent {
|
|||
.source(updatedSourceAsMap, updateSourceContentType)
|
||||
.version(updateVersion).versionType(request.versionType())
|
||||
.waitForActiveShards(request.waitForActiveShards())
|
||||
.timeout(request.timeout())
|
||||
.setRefreshPolicy(request.getRefreshPolicy());
|
||||
return new Result(indexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType);
|
||||
} else if ("delete".equals(operation)) {
|
||||
DeleteRequest deleteRequest = Requests.deleteRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent)
|
||||
.version(updateVersion).versionType(request.versionType())
|
||||
.waitForActiveShards(request.waitForActiveShards())
|
||||
.timeout(request.timeout())
|
||||
.setRefreshPolicy(request.getRefreshPolicy());
|
||||
return new Result(deleteRequest, DocWriteResponse.Result.DELETED, updatedSourceAsMap, updateSourceContentType);
|
||||
} else if ("none".equals(operation)) {
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
@ -73,7 +74,7 @@ final class BootstrapChecks {
|
|||
final List<BootstrapCheck> combinedChecks = new ArrayList<>(builtInChecks);
|
||||
combinedChecks.addAll(additionalChecks);
|
||||
check(
|
||||
enforceLimits(boundTransportAddress),
|
||||
enforceLimits(boundTransportAddress, DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)),
|
||||
Collections.unmodifiableList(combinedChecks),
|
||||
Node.NODE_NAME_SETTING.get(settings));
|
||||
}
|
||||
|
@ -164,13 +165,16 @@ final class BootstrapChecks {
|
|||
* Tests if the checks should be enforced.
|
||||
*
|
||||
* @param boundTransportAddress the node network bindings
|
||||
* @param discoveryType the discovery type
|
||||
* @return {@code true} if the checks should be enforced
|
||||
*/
|
||||
static boolean enforceLimits(final BoundTransportAddress boundTransportAddress) {
|
||||
Predicate<TransportAddress> isLoopbackOrLinkLocalAddress =
|
||||
static boolean enforceLimits(final BoundTransportAddress boundTransportAddress, final String discoveryType) {
|
||||
final Predicate<TransportAddress> isLoopbackOrLinkLocalAddress =
|
||||
t -> t.address().getAddress().isLinkLocalAddress() || t.address().getAddress().isLoopbackAddress();
|
||||
return !(Arrays.stream(boundTransportAddress.boundAddresses()).allMatch(isLoopbackOrLinkLocalAddress) &&
|
||||
final boolean bound =
|
||||
!(Arrays.stream(boundTransportAddress.boundAddresses()).allMatch(isLoopbackOrLinkLocalAddress) &&
|
||||
isLoopbackOrLinkLocalAddress.test(boundTransportAddress.publishAddress()));
|
||||
return bound && !"single-node".equals(discoveryType);
|
||||
}
|
||||
|
||||
// the list of checks to execute
|
||||
|
@ -195,6 +199,7 @@ final class BootstrapChecks {
|
|||
checks.add(new SystemCallFilterCheck(BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(settings)));
|
||||
checks.add(new OnErrorCheck());
|
||||
checks.add(new OnOutOfMemoryErrorCheck());
|
||||
checks.add(new EarlyAccessCheck());
|
||||
checks.add(new G1GCCheck());
|
||||
return Collections.unmodifiableList(checks);
|
||||
}
|
||||
|
@ -577,6 +582,34 @@ final class BootstrapChecks {
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Bootstrap check for early-access builds from OpenJDK.
|
||||
*/
|
||||
static class EarlyAccessCheck implements BootstrapCheck {
|
||||
|
||||
@Override
|
||||
public boolean check() {
|
||||
return "Oracle Corporation".equals(jvmVendor()) && javaVersion().endsWith("-ea");
|
||||
}
|
||||
|
||||
String jvmVendor() {
|
||||
return Constants.JVM_VENDOR;
|
||||
}
|
||||
|
||||
String javaVersion() {
|
||||
return Constants.JAVA_VERSION;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String errorMessage() {
|
||||
return String.format(
|
||||
Locale.ROOT,
|
||||
"Java version [%s] is an early-access build, only use release builds",
|
||||
javaVersion());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Bootstrap check for versions of HotSpot that are known to have issues that can lead to index corruption when G1GC is enabled.
|
||||
*/
|
||||
|
|
|
@ -19,9 +19,10 @@
|
|||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.PluginInfo;
|
||||
import org.elasticsearch.plugins.Platforms;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -32,97 +33,89 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* Spawns native plugin controller processes if present. Will only work prior to a system call filter being installed.
|
||||
* Spawns native plugin controller processes if present. Will only work prior to a system call
|
||||
* filter being installed.
|
||||
*/
|
||||
final class Spawner implements Closeable {
|
||||
|
||||
private static final String PROGRAM_NAME = Constants.WINDOWS ? "controller.exe" : "controller";
|
||||
private static final String PLATFORM_NAME = makePlatformName(Constants.OS_NAME, Constants.OS_ARCH);
|
||||
private static final String TMP_ENVVAR = "TMPDIR";
|
||||
|
||||
/**
|
||||
/*
|
||||
* References to the processes that have been spawned, so that we can destroy them.
|
||||
*/
|
||||
private final List<Process> processes = new ArrayList<>();
|
||||
private AtomicBoolean spawned = new AtomicBoolean();
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
IOUtils.close(() -> processes.stream().map(s -> (Closeable)s::destroy).iterator());
|
||||
} finally {
|
||||
processes.clear();
|
||||
}
|
||||
IOUtils.close(() -> processes.stream().map(s -> (Closeable) s::destroy).iterator());
|
||||
}
|
||||
|
||||
/**
|
||||
* For each plugin, attempt to spawn the controller daemon. Silently ignore any plugins
|
||||
* that don't include a controller for the correct platform.
|
||||
* Spawns the native controllers for each plugin
|
||||
*
|
||||
* @param environment the node environment
|
||||
* @throws IOException if an I/O error occurs reading the plugins or spawning a native process
|
||||
*/
|
||||
void spawnNativePluginControllers(Environment environment) throws IOException {
|
||||
if (Files.exists(environment.pluginsFile())) {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.pluginsFile())) {
|
||||
for (Path plugin : stream) {
|
||||
Path spawnPath = makeSpawnPath(plugin);
|
||||
if (Files.isRegularFile(spawnPath)) {
|
||||
spawnNativePluginController(spawnPath, environment.tmpFile());
|
||||
}
|
||||
void spawnNativePluginControllers(final Environment environment) throws IOException {
|
||||
if (!spawned.compareAndSet(false, true)) {
|
||||
throw new IllegalStateException("native controllers already spawned");
|
||||
}
|
||||
final Path pluginsFile = environment.pluginsFile();
|
||||
if (!Files.exists(pluginsFile)) {
|
||||
throw new IllegalStateException("plugins directory [" + pluginsFile + "] not found");
|
||||
}
|
||||
/*
|
||||
* For each plugin, attempt to spawn the controller daemon. Silently ignore any plugin that
|
||||
* don't include a controller for the correct platform.
|
||||
*/
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(pluginsFile)) {
|
||||
for (final Path plugin : stream) {
|
||||
final PluginInfo info = PluginInfo.readFromProperties(plugin);
|
||||
final Path spawnPath = Platforms.nativeControllerPath(plugin);
|
||||
if (!Files.isRegularFile(spawnPath)) {
|
||||
continue;
|
||||
}
|
||||
if (!info.hasNativeController()) {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"plugin [%s] does not have permission to fork native controller",
|
||||
plugin.getFileName());
|
||||
throw new IllegalArgumentException(message);
|
||||
}
|
||||
final Process process =
|
||||
spawnNativePluginController(spawnPath, environment.tmpFile());
|
||||
processes.add(process);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to spawn the controller daemon for a given plugin. The spawned process
|
||||
* will remain connected to this JVM via its stdin, stdout and stderr, but the
|
||||
* references to these streams are not available to code outside this package.
|
||||
* Attempt to spawn the controller daemon for a given plugin. The spawned process will remain
|
||||
* connected to this JVM via its stdin, stdout, and stderr streams, but the references to these
|
||||
* streams are not available to code outside this package.
|
||||
*/
|
||||
private void spawnNativePluginController(Path spawnPath, Path tmpPath) throws IOException {
|
||||
ProcessBuilder pb = new ProcessBuilder(spawnPath.toString());
|
||||
private Process spawnNativePluginController(
|
||||
final Path spawnPath,
|
||||
final Path tmpPath) throws IOException {
|
||||
final ProcessBuilder pb = new ProcessBuilder(spawnPath.toString());
|
||||
|
||||
// The only environment variable passes on the path to the temporary directory
|
||||
// the only environment variable passes on the path to the temporary directory
|
||||
pb.environment().clear();
|
||||
pb.environment().put(TMP_ENVVAR, tmpPath.toString());
|
||||
pb.environment().put("TMPDIR", tmpPath.toString());
|
||||
|
||||
// The output stream of the Process object corresponds to the daemon's stdin
|
||||
processes.add(pb.start());
|
||||
// the output stream of the process object corresponds to the daemon's stdin
|
||||
return pb.start();
|
||||
}
|
||||
|
||||
/**
|
||||
* The collection of processes representing spawned native controllers.
|
||||
*
|
||||
* @return the processes
|
||||
*/
|
||||
List<Process> getProcesses() {
|
||||
return Collections.unmodifiableList(processes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Make the full path to the program to be spawned.
|
||||
*/
|
||||
static Path makeSpawnPath(Path plugin) {
|
||||
return plugin.resolve("platform").resolve(PLATFORM_NAME).resolve("bin").resolve(PROGRAM_NAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* Make the platform name in the format used in Kibana downloads, for example:
|
||||
* - darwin-x86_64
|
||||
* - linux-x86-64
|
||||
* - windows-x86_64
|
||||
* For *nix platforms this is more-or-less `uname -s`-`uname -m` converted to lower case.
|
||||
* However, for consistency between different operating systems on the same architecture
|
||||
* "amd64" is replaced with "x86_64" and "i386" with "x86".
|
||||
* For Windows it's "windows-" followed by either "x86" or "x86_64".
|
||||
*/
|
||||
static String makePlatformName(String osName, String osArch) {
|
||||
String os = osName.toLowerCase(Locale.ROOT);
|
||||
if (os.startsWith("windows")) {
|
||||
os = "windows";
|
||||
} else if (os.equals("mac os x")) {
|
||||
os = "darwin";
|
||||
}
|
||||
String cpu = osArch.toLowerCase(Locale.ROOT);
|
||||
if (cpu.equals("amd64")) {
|
||||
cpu = "x86_64";
|
||||
} else if (cpu.equals("i386")) {
|
||||
cpu = "x86";
|
||||
}
|
||||
return os + "-" + cpu;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,10 @@ import org.elasticsearch.action.delete.DeleteResponse;
|
|||
import org.elasticsearch.action.explain.ExplainRequest;
|
||||
import org.elasticsearch.action.explain.ExplainRequestBuilder;
|
||||
import org.elasticsearch.action.explain.ExplainResponse;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilities;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequestBuilder;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
|
||||
import org.elasticsearch.action.fieldstats.FieldStatsRequest;
|
||||
import org.elasticsearch.action.fieldstats.FieldStatsRequestBuilder;
|
||||
import org.elasticsearch.action.fieldstats.FieldStatsResponse;
|
||||
|
@ -458,6 +462,21 @@ public interface Client extends ElasticsearchClient, Releasable {
|
|||
|
||||
void fieldStats(FieldStatsRequest request, ActionListener<FieldStatsResponse> listener);
|
||||
|
||||
/**
|
||||
* Builder for the field capabilities request.
|
||||
*/
|
||||
FieldCapabilitiesRequestBuilder prepareFieldCaps();
|
||||
|
||||
/**
|
||||
* An action that returns the field capabilities from the provided request
|
||||
*/
|
||||
ActionFuture<FieldCapabilitiesResponse> fieldCaps(FieldCapabilitiesRequest request);
|
||||
|
||||
/**
|
||||
* An action that returns the field capabilities from the provided request
|
||||
*/
|
||||
void fieldCaps(FieldCapabilitiesRequest request, ActionListener<FieldCapabilitiesResponse> listener);
|
||||
|
||||
/**
|
||||
* Returns this clients settings
|
||||
*/
|
||||
|
|
|
@ -50,6 +50,9 @@ import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRespon
|
|||
import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequest;
|
||||
import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequestBuilder;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
|
@ -817,5 +820,4 @@ public interface IndicesAdminClient extends ElasticsearchClient {
|
|||
* Swaps the index pointed to by an alias given all provided conditions are satisfied
|
||||
*/
|
||||
void rolloverIndex(RolloverRequest request, ActionListener<RolloverResponse> listener);
|
||||
|
||||
}
|
||||
|
|
|
@ -272,6 +272,10 @@ import org.elasticsearch.action.explain.ExplainAction;
|
|||
import org.elasticsearch.action.explain.ExplainRequest;
|
||||
import org.elasticsearch.action.explain.ExplainRequestBuilder;
|
||||
import org.elasticsearch.action.explain.ExplainResponse;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequestBuilder;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
|
||||
import org.elasticsearch.action.fieldstats.FieldStatsAction;
|
||||
import org.elasticsearch.action.fieldstats.FieldStatsRequest;
|
||||
import org.elasticsearch.action.fieldstats.FieldStatsRequestBuilder;
|
||||
|
@ -667,6 +671,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
return new FieldStatsRequestBuilder(this, FieldStatsAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void fieldCaps(FieldCapabilitiesRequest request, ActionListener<FieldCapabilitiesResponse> listener) {
|
||||
execute(FieldCapabilitiesAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<FieldCapabilitiesResponse> fieldCaps(FieldCapabilitiesRequest request) {
|
||||
return execute(FieldCapabilitiesAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldCapabilitiesRequestBuilder prepareFieldCaps() {
|
||||
return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE);
|
||||
}
|
||||
|
||||
static class Admin implements AdminClient {
|
||||
|
||||
private final ClusterAdmin clusterAdmin;
|
||||
|
|
|
@ -183,7 +183,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
throw new IllegalArgumentException("es.index.max_number_of_shards must be > 0");
|
||||
}
|
||||
return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, Math.min(5, maxNumShards), 1, maxNumShards,
|
||||
Property.IndexScope);
|
||||
Property.IndexScope, Property.Final);
|
||||
}
|
||||
|
||||
public static final String INDEX_SETTING_PREFIX = "index.";
|
||||
|
@ -226,7 +226,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string";
|
||||
public static final String SETTING_VERSION_UPGRADED = "index.version.upgraded";
|
||||
public static final String SETTING_VERSION_UPGRADED_STRING = "index.version.upgraded_string";
|
||||
public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible";
|
||||
public static final String SETTING_CREATION_DATE = "index.creation_date";
|
||||
/**
|
||||
* The user provided name for an index. This is the plain string provided by the user when the index was created.
|
||||
|
@ -311,7 +310,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
|
||||
private final Version indexCreatedVersion;
|
||||
private final Version indexUpgradedVersion;
|
||||
private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
||||
|
||||
private final ActiveShardCount waitForActiveShards;
|
||||
|
||||
|
@ -319,7 +317,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases,
|
||||
ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> inSyncAllocationIds,
|
||||
DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters,
|
||||
Version indexCreatedVersion, Version indexUpgradedVersion, org.apache.lucene.util.Version minimumCompatibleLuceneVersion,
|
||||
Version indexCreatedVersion, Version indexUpgradedVersion,
|
||||
int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards) {
|
||||
|
||||
this.index = index;
|
||||
|
@ -341,7 +339,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
this.initialRecoveryFilters = initialRecoveryFilters;
|
||||
this.indexCreatedVersion = indexCreatedVersion;
|
||||
this.indexUpgradedVersion = indexUpgradedVersion;
|
||||
this.minimumCompatibleLuceneVersion = minimumCompatibleLuceneVersion;
|
||||
this.routingNumShards = routingNumShards;
|
||||
this.routingFactor = routingNumShards / numberOfShards;
|
||||
this.routingPartitionSize = routingPartitionSize;
|
||||
|
@ -401,13 +398,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
return indexUpgradedVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link org.apache.lucene.util.Version} of the oldest lucene segment in the index
|
||||
*/
|
||||
public org.apache.lucene.util.Version getMinimumCompatibleVersion() {
|
||||
return minimumCompatibleLuceneVersion;
|
||||
}
|
||||
|
||||
public long getCreationDate() {
|
||||
return settings.getAsLong(SETTING_CREATION_DATE, -1L);
|
||||
}
|
||||
|
@ -1052,17 +1042,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
}
|
||||
Version indexCreatedVersion = Version.indexCreated(settings);
|
||||
Version indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
|
||||
String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
|
||||
final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
||||
if (stringLuceneVersion != null) {
|
||||
try {
|
||||
minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion);
|
||||
} catch (ParseException ex) {
|
||||
throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE + "] setting", ex);
|
||||
}
|
||||
} else {
|
||||
minimumCompatibleLuceneVersion = null;
|
||||
}
|
||||
|
||||
if (primaryTerms == null) {
|
||||
initializePrimaryTerms();
|
||||
|
@ -1081,7 +1060,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
|
||||
return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
|
||||
tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters,
|
||||
indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards);
|
||||
indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards);
|
||||
}
|
||||
|
||||
public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
|
|
|
@ -603,9 +603,6 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
.put(IndexMetaData.SETTING_ROUTING_PARTITION_SIZE, sourceMetaData.getRoutingPartitionSize())
|
||||
.put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), shrinkFromIndex.getName())
|
||||
.put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), shrinkFromIndex.getUUID());
|
||||
if (sourceMetaData.getMinimumCompatibleVersion() != null) {
|
||||
indexSettingsBuilder.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, sourceMetaData.getMinimumCompatibleVersion());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -165,10 +165,6 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
indexScopedSettings.validate(normalizedSettings);
|
||||
// never allow to change the number of shards
|
||||
for (Map.Entry<String, String> entry : normalizedSettings.getAsMap().entrySet()) {
|
||||
if (entry.getKey().equals(IndexMetaData.SETTING_NUMBER_OF_SHARDS)) {
|
||||
listener.onFailure(new IllegalArgumentException("can't change the number of shards for an index"));
|
||||
return;
|
||||
}
|
||||
Setting setting = indexScopedSettings.get(entry.getKey());
|
||||
assert setting != null; // we already validated the normalized settings
|
||||
settingsForClosedIndices.put(entry.getKey(), entry.getValue());
|
||||
|
@ -329,7 +325,6 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
// No reason to pollute the settings, we didn't really upgrade anything
|
||||
metaDataBuilder.put(IndexMetaData.builder(indexMetaData)
|
||||
.settings(Settings.builder().put(indexMetaData.getSettings())
|
||||
.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, entry.getValue().v2())
|
||||
.put(IndexMetaData.SETTING_VERSION_UPGRADED, entry.getValue().v1())
|
||||
)
|
||||
);
|
||||
|
|
|
@ -333,9 +333,10 @@ public class Joda {
|
|||
boolean isPositive = text.startsWith("-") == false;
|
||||
boolean isTooLong = text.length() > estimateParsedLength();
|
||||
|
||||
if ((isPositive && isTooLong) ||
|
||||
// timestamps have to have UTC timezone
|
||||
bucket.getZone() != DateTimeZone.UTC) {
|
||||
if (bucket.getZone() != DateTimeZone.UTC) {
|
||||
String format = hasMilliSecondPrecision ? "epoch_millis" : "epoch_second";
|
||||
throw new IllegalArgumentException("time_zone must be UTC for format [" + format + "]");
|
||||
} else if (isPositive && isTooLong) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,6 +57,8 @@ import org.apache.lucene.search.TopFieldDocs;
|
|||
import org.apache.lucene.search.TwoPhaseIterator;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.search.grouping.CollapseTopFieldDocs;
|
||||
import org.apache.lucene.search.SortedNumericSortField;
|
||||
import org.apache.lucene.search.SortedSetSortField;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
|
@ -552,7 +554,22 @@ public class Lucene {
|
|||
SortField newSortField = new SortField(sortField.getField(), SortField.Type.DOUBLE);
|
||||
newSortField.setMissingValue(sortField.getMissingValue());
|
||||
sortField = newSortField;
|
||||
} else if (sortField.getClass() == SortedSetSortField.class) {
|
||||
// for multi-valued sort field, we replace the SortedSetSortField with a simple SortField.
|
||||
// It works because the sort field is only used to merge results from different shards.
|
||||
SortField newSortField = new SortField(sortField.getField(), SortField.Type.STRING, sortField.getReverse());
|
||||
newSortField.setMissingValue(sortField.getMissingValue());
|
||||
sortField = newSortField;
|
||||
} else if (sortField.getClass() == SortedNumericSortField.class) {
|
||||
// for multi-valued sort field, we replace the SortedSetSortField with a simple SortField.
|
||||
// It works because the sort field is only used to merge results from different shards.
|
||||
SortField newSortField = new SortField(sortField.getField(),
|
||||
((SortedNumericSortField) sortField).getNumericType(),
|
||||
sortField.getReverse());
|
||||
newSortField.setMissingValue(sortField.getMissingValue());
|
||||
sortField = newSortField;
|
||||
}
|
||||
|
||||
if (sortField.getClass() != SortField.class) {
|
||||
throw new IllegalArgumentException("Cannot serialize SortField impl [" + sortField + "]");
|
||||
}
|
||||
|
|
|
@ -35,8 +35,6 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
|
@ -382,11 +380,19 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
/**
|
||||
* Returns <code>true</code> if the setting for the given key is dynamically updateable. Otherwise <code>false</code>.
|
||||
*/
|
||||
public boolean hasDynamicSetting(String key) {
|
||||
public boolean isDynamicSetting(String key) {
|
||||
final Setting<?> setting = get(key);
|
||||
return setting != null && setting.isDynamic();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if the setting for the given key is final. Otherwise <code>false</code>.
|
||||
*/
|
||||
public boolean isFinalSetting(String key) {
|
||||
final Setting<?> setting = get(key);
|
||||
return setting != null && setting.isFinal();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a settings object that contains all settings that are not
|
||||
* already set in the given source. The diff contains either the default value for each
|
||||
|
@ -465,11 +471,14 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
boolean changed = false;
|
||||
final Set<String> toRemove = new HashSet<>();
|
||||
Settings.Builder settingsBuilder = Settings.builder();
|
||||
final Predicate<String> canUpdate = (key) -> (onlyDynamic == false && get(key) != null) || hasDynamicSetting(key);
|
||||
final Predicate<String> canRemove = (key) ->( // we can delete if
|
||||
onlyDynamic && hasDynamicSetting(key) // it's a dynamicSetting and we only do dynamic settings
|
||||
|| get(key) == null && key.startsWith(ARCHIVED_SETTINGS_PREFIX) // the setting is not registered AND it's been archived
|
||||
|| (onlyDynamic == false && get(key) != null)); // if it's not dynamic AND we have a key
|
||||
final Predicate<String> canUpdate = (key) -> (
|
||||
isFinalSetting(key) == false && // it's not a final setting
|
||||
((onlyDynamic == false && get(key) != null) || isDynamicSetting(key)));
|
||||
final Predicate<String> canRemove = (key) ->(// we can delete if
|
||||
isFinalSetting(key) == false && // it's not a final setting
|
||||
(onlyDynamic && isDynamicSetting(key) // it's a dynamicSetting and we only do dynamic settings
|
||||
|| get(key) == null && key.startsWith(ARCHIVED_SETTINGS_PREFIX) // the setting is not registered AND it's been archived
|
||||
|| (onlyDynamic == false && get(key) != null))); // if it's not dynamic AND we have a key
|
||||
for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) {
|
||||
if (entry.getValue() == null && (canRemove.test(entry.getKey()) || entry.getKey().endsWith("*"))) {
|
||||
// this either accepts null values that suffice the canUpdate test OR wildcard expressions (key ends with *)
|
||||
|
@ -482,7 +491,11 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
updates.put(entry.getKey(), entry.getValue());
|
||||
changed = true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
|
||||
if (isFinalSetting(entry.getKey())) {
|
||||
throw new IllegalArgumentException("final " + type + " setting [" + entry.getKey() + "], not updateable");
|
||||
} else {
|
||||
throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
|
||||
}
|
||||
}
|
||||
}
|
||||
changed |= applyDeletes(toRemove, target, canRemove);
|
||||
|
|
|
@ -125,6 +125,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING,
|
||||
EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING,
|
||||
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING,
|
||||
IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING,
|
||||
IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY,
|
||||
FieldMapper.IGNORE_MALFORMED_SETTING,
|
||||
FieldMapper.COERCE_SETTING,
|
||||
|
|
|
@ -95,6 +95,12 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
*/
|
||||
Dynamic,
|
||||
|
||||
/**
|
||||
* mark this setting as final, not updateable even when the context is not dynamic
|
||||
* ie. Setting this property on an index scoped setting will fail update when the index is closed
|
||||
*/
|
||||
Final,
|
||||
|
||||
/**
|
||||
* mark this setting as deprecated
|
||||
*/
|
||||
|
@ -135,6 +141,9 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
this.properties = EMPTY_PROPERTIES;
|
||||
} else {
|
||||
this.properties = EnumSet.copyOf(Arrays.asList(properties));
|
||||
if (isDynamic() && isFinal()) {
|
||||
throw new IllegalArgumentException("final setting [" + key + "] cannot be dynamic");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -218,6 +227,13 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return properties.contains(Property.Dynamic);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if this setting is final, otherwise <code>false</code>
|
||||
*/
|
||||
public final boolean isFinal() {
|
||||
return properties.contains(Property.Final);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the setting properties
|
||||
* @see Property
|
||||
|
|
|
@ -40,7 +40,7 @@ public class AtomicArray<E> {
|
|||
}
|
||||
|
||||
private final AtomicReferenceArray<E> array;
|
||||
private volatile List<Entry<E>> nonNullList;
|
||||
private volatile List<E> nonNullList;
|
||||
|
||||
public AtomicArray(int size) {
|
||||
array = new AtomicReferenceArray<>(size);
|
||||
|
@ -87,19 +87,18 @@ public class AtomicArray<E> {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the it as a non null list, with an Entry wrapping each value allowing to
|
||||
* retain its index.
|
||||
* Returns the it as a non null list.
|
||||
*/
|
||||
public List<Entry<E>> asList() {
|
||||
public List<E> asList() {
|
||||
if (nonNullList == null) {
|
||||
if (array == null || array.length() == 0) {
|
||||
nonNullList = Collections.emptyList();
|
||||
} else {
|
||||
List<Entry<E>> list = new ArrayList<>(array.length());
|
||||
List<E> list = new ArrayList<>(array.length());
|
||||
for (int i = 0; i < array.length(); i++) {
|
||||
E e = array.get(i);
|
||||
if (e != null) {
|
||||
list.add(new Entry<>(i, e));
|
||||
list.add(e);
|
||||
}
|
||||
}
|
||||
nonNullList = list;
|
||||
|
@ -120,23 +119,4 @@ public class AtomicArray<E> {
|
|||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
/**
|
||||
* An entry within the array.
|
||||
*/
|
||||
public static class Entry<E> {
|
||||
/**
|
||||
* The original index of the value within the array.
|
||||
*/
|
||||
public final int index;
|
||||
/**
|
||||
* The value.
|
||||
*/
|
||||
public final E value;
|
||||
|
||||
public Entry(int index, E value) {
|
||||
this.index = index;
|
||||
this.value = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -75,6 +74,7 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct();
|
||||
private final Map<String, String> defaultHeader;
|
||||
private final ContextThreadLocal threadLocal;
|
||||
private boolean isSystemContext;
|
||||
|
||||
/**
|
||||
* Creates a new ThreadContext instance
|
||||
|
@ -317,6 +317,21 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
return threadLocal.get() == DEFAULT_CONTEXT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks this thread context as an internal system context. This signals that actions in this context are issued
|
||||
* by the system itself rather than by a user action.
|
||||
*/
|
||||
public void markAsSystemContext() {
|
||||
threadLocal.set(threadLocal.get().setSystemContext());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff this context is a system context
|
||||
*/
|
||||
public boolean isSystemContext() {
|
||||
return threadLocal.get().isSystemContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if the context is closed, otherwise <code>true</code>
|
||||
*/
|
||||
|
@ -338,6 +353,7 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
private final Map<String, String> requestHeaders;
|
||||
private final Map<String, Object> transientHeaders;
|
||||
private final Map<String, List<String>> responseHeaders;
|
||||
private final boolean isSystemContext;
|
||||
|
||||
private ThreadContextStruct(StreamInput in) throws IOException {
|
||||
final int numRequest = in.readVInt();
|
||||
|
@ -349,27 +365,36 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
this.requestHeaders = requestHeaders;
|
||||
this.responseHeaders = in.readMapOfLists(StreamInput::readString, StreamInput::readString);
|
||||
this.transientHeaders = Collections.emptyMap();
|
||||
isSystemContext = false; // we never serialize this it's a transient flag
|
||||
}
|
||||
|
||||
private ThreadContextStruct setSystemContext() {
|
||||
if (isSystemContext) {
|
||||
return this;
|
||||
}
|
||||
return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, true);
|
||||
}
|
||||
|
||||
private ThreadContextStruct(Map<String, String> requestHeaders,
|
||||
Map<String, List<String>> responseHeaders,
|
||||
Map<String, Object> transientHeaders) {
|
||||
Map<String, Object> transientHeaders, boolean isSystemContext) {
|
||||
this.requestHeaders = requestHeaders;
|
||||
this.responseHeaders = responseHeaders;
|
||||
this.transientHeaders = transientHeaders;
|
||||
this.isSystemContext = isSystemContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* This represents the default context and it should only ever be called by {@link #DEFAULT_CONTEXT}.
|
||||
*/
|
||||
private ThreadContextStruct() {
|
||||
this(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
|
||||
this(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), false);
|
||||
}
|
||||
|
||||
private ThreadContextStruct putRequest(String key, String value) {
|
||||
Map<String, String> newRequestHeaders = new HashMap<>(this.requestHeaders);
|
||||
putSingleHeader(key, value, newRequestHeaders);
|
||||
return new ThreadContextStruct(newRequestHeaders, responseHeaders, transientHeaders);
|
||||
return new ThreadContextStruct(newRequestHeaders, responseHeaders, transientHeaders, isSystemContext);
|
||||
}
|
||||
|
||||
private void putSingleHeader(String key, String value, Map<String, String> newHeaders) {
|
||||
|
@ -387,7 +412,7 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
putSingleHeader(entry.getKey(), entry.getValue(), newHeaders);
|
||||
}
|
||||
newHeaders.putAll(this.requestHeaders);
|
||||
return new ThreadContextStruct(newHeaders, responseHeaders, transientHeaders);
|
||||
return new ThreadContextStruct(newHeaders, responseHeaders, transientHeaders, isSystemContext);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -408,7 +433,7 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
newResponseHeaders.put(key, entry.getValue());
|
||||
}
|
||||
}
|
||||
return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders);
|
||||
return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext);
|
||||
}
|
||||
|
||||
private ThreadContextStruct putResponse(final String key, final String value, final Function<String, String> uniqueValue) {
|
||||
|
@ -432,7 +457,7 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
newResponseHeaders.put(key, Collections.singletonList(value));
|
||||
}
|
||||
|
||||
return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders);
|
||||
return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext);
|
||||
}
|
||||
|
||||
private ThreadContextStruct putTransient(String key, Object value) {
|
||||
|
@ -440,7 +465,7 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
if (newTransient.putIfAbsent(key, value) != null) {
|
||||
throw new IllegalArgumentException("value for key [" + key + "] already present");
|
||||
}
|
||||
return new ThreadContextStruct(requestHeaders, responseHeaders, newTransient);
|
||||
return new ThreadContextStruct(requestHeaders, responseHeaders, newTransient, isSystemContext);
|
||||
}
|
||||
|
||||
boolean isEmpty() {
|
||||
|
|
|
@ -19,6 +19,20 @@
|
|||
|
||||
package org.elasticsearch.discovery;
|
||||
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.single.SingleNodeDiscovery;
|
||||
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.plugins.DiscoveryPlugin;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -28,21 +42,6 @@ import java.util.Optional;
|
|||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.ZenPing;
|
||||
import org.elasticsearch.plugins.DiscoveryPlugin;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
* A module for loading classes for node discovery.
|
||||
*/
|
||||
|
@ -83,6 +82,7 @@ public class DiscoveryModule {
|
|||
discoveryTypes.put("zen",
|
||||
() -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, clusterService, hostsProvider));
|
||||
discoveryTypes.put("none", () -> new NoneDiscovery(settings, clusterService, clusterService.getClusterSettings()));
|
||||
discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, clusterService));
|
||||
for (DiscoveryPlugin plugin : plugins) {
|
||||
plugin.getDiscoveryTypes(threadPool, transportService, namedWriteableRegistry,
|
||||
clusterService, hostsProvider).entrySet().forEach(entry -> {
|
||||
|
@ -96,10 +96,12 @@ public class DiscoveryModule {
|
|||
if (discoverySupplier == null) {
|
||||
throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]");
|
||||
}
|
||||
Loggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType);
|
||||
discovery = Objects.requireNonNull(discoverySupplier.get());
|
||||
}
|
||||
|
||||
public Discovery getDiscovery() {
|
||||
return discovery;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.single;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskConfig;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.zen.PendingClusterStateStats;
|
||||
import org.elasticsearch.discovery.zen.PendingClusterStatesQueue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A discovery implementation where the only member of the cluster is the local node.
|
||||
*/
|
||||
public class SingleNodeDiscovery extends AbstractLifecycleComponent implements Discovery {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final DiscoverySettings discoverySettings;
|
||||
|
||||
public SingleNodeDiscovery(final Settings settings, final ClusterService clusterService) {
|
||||
super(Objects.requireNonNull(settings));
|
||||
this.clusterService = Objects.requireNonNull(clusterService);
|
||||
final ClusterSettings clusterSettings =
|
||||
Objects.requireNonNull(clusterService.getClusterSettings());
|
||||
this.discoverySettings = new DiscoverySettings(settings, clusterSettings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoveryNode localNode() {
|
||||
return clusterService.localNode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String nodeDescription() {
|
||||
return clusterService.getClusterName().value() + "/" + clusterService.localNode().getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setAllocationService(final AllocationService allocationService) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void publish(final ClusterChangedEvent event, final AckListener listener) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoveryStats stats() {
|
||||
return new DiscoveryStats((PendingClusterStateStats) null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoverySettings getDiscoverySettings() {
|
||||
return discoverySettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void startInitialJoin() {
|
||||
final ClusterStateTaskExecutor<DiscoveryNode> executor =
|
||||
new ClusterStateTaskExecutor<DiscoveryNode>() {
|
||||
|
||||
@Override
|
||||
public ClusterTasksResult<DiscoveryNode> execute(
|
||||
final ClusterState current,
|
||||
final List<DiscoveryNode> tasks) throws Exception {
|
||||
assert tasks.size() == 1;
|
||||
final DiscoveryNodes.Builder nodes =
|
||||
DiscoveryNodes.builder(current.nodes());
|
||||
// always set the local node as master, there will not be other nodes
|
||||
nodes.masterNodeId(localNode().getId());
|
||||
final ClusterState next =
|
||||
ClusterState.builder(current).nodes(nodes).build();
|
||||
final ClusterTasksResult.Builder<DiscoveryNode> result =
|
||||
ClusterTasksResult.builder();
|
||||
return result.successes(tasks).build(next);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
return false;
|
||||
}
|
||||
|
||||
};
|
||||
final ClusterStateTaskConfig config = ClusterStateTaskConfig.build(Priority.URGENT);
|
||||
clusterService.submitStateUpdateTasks(
|
||||
"single-node-start-initial-join",
|
||||
Collections.singletonMap(localNode(), (s, e) -> {}), config, executor);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMinimumMasterNodes() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() throws IOException {
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -178,20 +178,28 @@ public class MembershipAction extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public void messageReceived(ValidateJoinRequest request, TransportChannel channel) throws Exception {
|
||||
ensureIndexCompatibility(Version.CURRENT.minimumIndexCompatibilityVersion(), request.state.getMetaData());
|
||||
ensureIndexCompatibility(Version.CURRENT, request.state.getMetaData());
|
||||
// for now, the mere fact that we can serialize the cluster state acts as validation....
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures that all indices are compatible with the supported index version.
|
||||
* Ensures that all indices are compatible with the given node version. This will ensure that all indices in the given metadata
|
||||
* will not be created with a newer version of elasticsearch as well as that all indices are newer or equal to the minimum index
|
||||
* compatibility version.
|
||||
* @see Version#minimumIndexCompatibilityVersion()
|
||||
* @throws IllegalStateException if any index is incompatible with the given version
|
||||
*/
|
||||
static void ensureIndexCompatibility(final Version supportedIndexVersion, MetaData metaData) {
|
||||
static void ensureIndexCompatibility(final Version nodeVersion, MetaData metaData) {
|
||||
Version supportedIndexVersion = nodeVersion.minimumIndexCompatibilityVersion();
|
||||
// we ensure that all indices in the cluster we join are compatible with us no matter if they are
|
||||
// closed or not we can't read mappings of these indices so we need to reject the join...
|
||||
for (IndexMetaData idxMetaData : metaData) {
|
||||
if (idxMetaData.getCreationVersion().after(nodeVersion)) {
|
||||
throw new IllegalStateException("index " + idxMetaData.getIndex() + " version not supported: "
|
||||
+ idxMetaData.getCreationVersion() + " the node version is: " + nodeVersion);
|
||||
}
|
||||
if (idxMetaData.getCreationVersion().before(supportedIndexVersion)) {
|
||||
throw new IllegalStateException("index " + idxMetaData.getIndex() + " version not supported: "
|
||||
+ idxMetaData.getCreationVersion() + " minimum compatible index version is: " + supportedIndexVersion);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue