Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
ac3db8c30f
|
@ -354,7 +354,7 @@ These are the linux flavors the Vagrantfile currently supports:
|
|||
* oel-6 aka Oracle Enterprise Linux 6
|
||||
* oel-7 aka Oracle Enterprise Linux 7
|
||||
* sles-12
|
||||
* opensuse-13
|
||||
* opensuse-42 aka Leap
|
||||
|
||||
We're missing the following from the support matrix because there aren't high
|
||||
quality boxes available in vagrant atlas:
|
||||
|
|
|
@ -60,8 +60,8 @@ Vagrant.configure(2) do |config|
|
|||
config.vm.box = "elastic/fedora-25-x86_64"
|
||||
dnf_common config
|
||||
end
|
||||
config.vm.define "opensuse-13" do |config|
|
||||
config.vm.box = "elastic/opensuse-13-x86_64"
|
||||
config.vm.define "opensuse-42" do |config|
|
||||
config.vm.box = "elastic/opensuse-42-x86_64"
|
||||
opensuse_common config
|
||||
end
|
||||
config.vm.define "sles-12" do |config|
|
||||
|
|
35
build.gradle
35
build.gradle
|
@ -123,42 +123,39 @@ allprojects {
|
|||
}
|
||||
}
|
||||
|
||||
task('verifyVersions') {
|
||||
description 'Verifies that all released versions that are indexed compatible are listed in Version.java.'
|
||||
group 'Verification'
|
||||
enabled = false == gradle.startParameter.isOffline()
|
||||
task verifyVersions {
|
||||
doLast {
|
||||
if (gradle.startParameter.isOffline()) {
|
||||
throw new GradleException("Must run in online mode to verify versions")
|
||||
}
|
||||
// Read the list from maven central
|
||||
Node xml
|
||||
new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
|
||||
xml = new XmlParser().parse(s)
|
||||
}
|
||||
Set<String> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d\.\d\.\d/ })
|
||||
Set<Version> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d\.\d\.\d/ }.collect { Version.fromString(it) })
|
||||
|
||||
// Limit the known versions to those that should be index compatible
|
||||
knownVersions = knownVersions.findAll { Integer.parseInt(it.split('\\.')[0]) >= prevMajor }
|
||||
// Limit the known versions to those that should be index compatible, and are not future versions
|
||||
knownVersions = knownVersions.findAll { it.major >= prevMajor && it.before(VersionProperties.elasticsearch) }
|
||||
|
||||
/* Limit the listed versions to those that have been marked as released.
|
||||
* Versions not marked as released don't get the same testing and we want
|
||||
* to make sure that we flip all unreleased versions to released as soon
|
||||
* as possible after release. */
|
||||
Set<String> actualVersions = new TreeSet<>(
|
||||
indexCompatVersions
|
||||
.findAll { false == it.snapshot }
|
||||
.collect { it.toString() })
|
||||
|
||||
// TODO this is almost certainly going to fail on 5.4 when we release 5.5.0
|
||||
Set<Version> actualVersions = new TreeSet<>(indexCompatVersions.findAll { false == it.snapshot })
|
||||
|
||||
// Finally, compare!
|
||||
if (!knownVersions.equals(actualVersions)) {
|
||||
throw new GradleException("out-of-date versions\nActual :" +
|
||||
actualVersions + "\nExpected:" + knownVersions +
|
||||
"; update Version.java")
|
||||
if (knownVersions.equals(actualVersions) == false) {
|
||||
throw new GradleException("out-of-date released versions\nActual :" + actualVersions + "\nExpected:" + knownVersions +
|
||||
"\nUpdate Version.java. Note that Version.CURRENT doesn't count because it is not released.")
|
||||
}
|
||||
}
|
||||
}
|
||||
task('precommit') {
|
||||
dependsOn(verifyVersions)
|
||||
|
||||
task branchConsistency {
|
||||
description 'Ensures this branch is internally consistent. For example, that versions constants match released versions.'
|
||||
group 'Verification'
|
||||
dependsOn verifyVersions
|
||||
}
|
||||
|
||||
subprojects {
|
||||
|
|
|
@ -12,10 +12,38 @@ import org.gradle.api.tasks.testing.Test
|
|||
class RandomizedTestingPlugin implements Plugin<Project> {
|
||||
|
||||
void apply(Project project) {
|
||||
setupSeed(project)
|
||||
replaceTestTask(project.tasks)
|
||||
configureAnt(project.ant)
|
||||
}
|
||||
|
||||
/**
|
||||
* Pins the test seed at configuration time so it isn't different on every
|
||||
* {@link RandomizedTestingTask} execution. This is useful if random
|
||||
* decisions in one run of {@linkplain RandomizedTestingTask} influence the
|
||||
* outcome of subsequent runs. Pinning the seed up front like this makes
|
||||
* the reproduction line from one run be useful on another run.
|
||||
*/
|
||||
static void setupSeed(Project project) {
|
||||
if (project.rootProject.ext.has('testSeed')) {
|
||||
/* Skip this if we've already pinned the testSeed. It is important
|
||||
* that this checks the rootProject so that we know we've only ever
|
||||
* initialized one time. */
|
||||
return
|
||||
}
|
||||
String testSeed = System.getProperty('tests.seed')
|
||||
if (testSeed == null) {
|
||||
long seed = new Random(System.currentTimeMillis()).nextLong()
|
||||
testSeed = Long.toUnsignedString(seed, 16).toUpperCase(Locale.ROOT)
|
||||
}
|
||||
/* Set the testSeed on the root project first so other projects can use
|
||||
* it during initialization. */
|
||||
project.rootProject.ext.testSeed = testSeed
|
||||
project.rootProject.subprojects {
|
||||
project.ext.testSeed = testSeed
|
||||
}
|
||||
}
|
||||
|
||||
static void replaceTestTask(TaskContainer tasks) {
|
||||
Test oldTestTask = tasks.findByPath('test')
|
||||
if (oldTestTask == null) {
|
||||
|
|
|
@ -9,6 +9,7 @@ import org.apache.tools.ant.DefaultLogger
|
|||
import org.apache.tools.ant.RuntimeConfigurable
|
||||
import org.apache.tools.ant.UnknownElement
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.file.FileTreeElement
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
|
@ -259,8 +260,13 @@ class RandomizedTestingTask extends DefaultTask {
|
|||
}
|
||||
}
|
||||
for (Map.Entry<String, Object> prop : systemProperties) {
|
||||
if (prop.getKey().equals('tests.seed')) {
|
||||
throw new InvalidUserDataException('Seed should be ' +
|
||||
'set on the project instead of a system property')
|
||||
}
|
||||
sysproperty key: prop.getKey(), value: prop.getValue().toString()
|
||||
}
|
||||
systemProperty 'tests.seed', project.testSeed
|
||||
for (Map.Entry<String, Object> envvar : environmentVariables) {
|
||||
env key: envvar.getKey(), value: envvar.getValue().toString()
|
||||
}
|
||||
|
|
|
@ -120,6 +120,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
println " JDK Version : ${gradleJavaVersionDetails}"
|
||||
println " JAVA_HOME : ${gradleJavaHome}"
|
||||
}
|
||||
println " Random Testing Seed : ${project.testSeed}"
|
||||
|
||||
// enforce gradle version
|
||||
GradleVersion minGradle = GradleVersion.version('3.3')
|
||||
|
@ -525,7 +526,12 @@ class BuildPlugin implements Plugin<Project> {
|
|||
systemProperty 'tests.logger.level', 'WARN'
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('tests.') ||
|
||||
property.getKey().startsWith('es.')) {
|
||||
property.getKey().startsWith('es.')) {
|
||||
if (property.getKey().equals('tests.seed')) {
|
||||
/* The seed is already set on the project so we
|
||||
* shouldn't attempt to override it. */
|
||||
continue;
|
||||
}
|
||||
systemProperty property.getKey(), property.getValue()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,12 @@
|
|||
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
import groovy.transform.Sortable
|
||||
|
||||
/**
|
||||
* Encapsulates comparison and printing logic for an x.y.z version.
|
||||
*/
|
||||
@Sortable(includes=['id'])
|
||||
public class Version {
|
||||
|
||||
final int major
|
||||
|
@ -57,10 +60,6 @@ public class Version {
|
|||
return "${major}.${minor}.${bugfix}${snapshotStr}"
|
||||
}
|
||||
|
||||
public boolean equals(Version compareTo) {
|
||||
return id == compareTo.id
|
||||
}
|
||||
|
||||
public boolean before(String compareTo) {
|
||||
return id < fromString(compareTo).id
|
||||
}
|
||||
|
|
|
@ -76,6 +76,14 @@ class ClusterConfiguration {
|
|||
" " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + System.getProperty('tests.jvm.argline', '')
|
||||
|
||||
/**
|
||||
* Should the shared environment be cleaned on cluster startup? Defaults
|
||||
* to {@code true} so we run with a clean cluster but some tests wish to
|
||||
* preserve snapshots between clusters so they set this to true.
|
||||
*/
|
||||
@Input
|
||||
boolean cleanShared = true
|
||||
|
||||
/**
|
||||
* A closure to call which returns the unicast host to connect to for cluster formation.
|
||||
*
|
||||
|
|
|
@ -54,14 +54,24 @@ class ClusterFormationTasks {
|
|||
*/
|
||||
static List<NodeInfo> setup(Project project, String prefix, Task runner, ClusterConfiguration config) {
|
||||
File sharedDir = new File(project.buildDir, "cluster/shared")
|
||||
// first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything
|
||||
// in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk
|
||||
// such that snapshots survive failures / test runs and there is no simple way today to fix that.
|
||||
Task cleanup = project.tasks.create(name: "${prefix}#prepareCluster.cleanShared", type: Delete, dependsOn: config.dependencies) {
|
||||
delete sharedDir
|
||||
doLast {
|
||||
sharedDir.mkdirs()
|
||||
}
|
||||
Object startDependencies = config.dependencies
|
||||
/* First, if we want a clean environment, we remove everything in the
|
||||
* shared cluster directory to ensure there are no leftovers in repos
|
||||
* or anything in theory this should not be necessary but repositories
|
||||
* are only deleted in the cluster-state and not on-disk such that
|
||||
* snapshots survive failures / test runs and there is no simple way
|
||||
* today to fix that. */
|
||||
if (config.cleanShared) {
|
||||
Task cleanup = project.tasks.create(
|
||||
name: "${prefix}#prepareCluster.cleanShared",
|
||||
type: Delete,
|
||||
dependsOn: startDependencies) {
|
||||
delete sharedDir
|
||||
doLast {
|
||||
sharedDir.mkdirs()
|
||||
}
|
||||
}
|
||||
startDependencies = cleanup
|
||||
}
|
||||
List<Task> startTasks = []
|
||||
List<NodeInfo> nodes = []
|
||||
|
@ -103,7 +113,7 @@ class ClusterFormationTasks {
|
|||
}
|
||||
NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir)
|
||||
nodes.add(node)
|
||||
Task dependsOn = startTasks.empty ? cleanup : startTasks.get(0)
|
||||
Object dependsOn = startTasks.empty ? startDependencies : startTasks.get(0)
|
||||
startTasks.add(configureNode(project, prefix, runner, dependsOn, node, config, distro, nodes.get(0)))
|
||||
}
|
||||
|
||||
|
|
|
@ -25,12 +25,6 @@ class VagrantPropertiesExtension {
|
|||
@Input
|
||||
List<String> boxes
|
||||
|
||||
@Input
|
||||
Long testSeed
|
||||
|
||||
@Input
|
||||
String formattedTestSeed
|
||||
|
||||
@Input
|
||||
String upgradeFromVersion
|
||||
|
||||
|
|
|
@ -0,0 +1,127 @@
|
|||
package org.elasticsearch.gradle.vagrant
|
||||
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.process.ExecResult
|
||||
import org.gradle.process.internal.ExecException
|
||||
|
||||
/**
|
||||
* Global configuration for if Vagrant tasks are supported in this
|
||||
* build environment.
|
||||
*/
|
||||
class VagrantSupportPlugin implements Plugin<Project> {
|
||||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
if (project.rootProject.ext.has('vagrantEnvChecksDone') == false) {
|
||||
Map vagrantInstallation = getVagrantInstallation(project)
|
||||
Map virtualBoxInstallation = getVirtualBoxInstallation(project)
|
||||
|
||||
project.rootProject.ext.vagrantInstallation = vagrantInstallation
|
||||
project.rootProject.ext.virtualBoxInstallation = virtualBoxInstallation
|
||||
project.rootProject.ext.vagrantSupported = vagrantInstallation.supported && virtualBoxInstallation.supported
|
||||
project.rootProject.ext.vagrantEnvChecksDone = true
|
||||
|
||||
// Finding that HOME needs to be set when performing vagrant updates
|
||||
String homeLocation = System.getenv("HOME")
|
||||
if (project.rootProject.ext.vagrantSupported && homeLocation == null) {
|
||||
throw new GradleException("Could not locate \$HOME environment variable. Vagrant is enabled " +
|
||||
"and requires \$HOME to be set to function properly.")
|
||||
}
|
||||
}
|
||||
|
||||
addVerifyInstallationTasks(project)
|
||||
}
|
||||
|
||||
private Map getVagrantInstallation(Project project) {
|
||||
try {
|
||||
ByteArrayOutputStream pipe = new ByteArrayOutputStream()
|
||||
ExecResult runResult = project.exec {
|
||||
commandLine 'vagrant', '--version'
|
||||
standardOutput pipe
|
||||
ignoreExitValue true
|
||||
}
|
||||
String version = pipe.toString().trim()
|
||||
if (runResult.exitValue == 0) {
|
||||
if (version ==~ /Vagrant 1\.(8\.[6-9]|9\.[0-9])+/) {
|
||||
return [ 'supported' : true ]
|
||||
} else {
|
||||
return [ 'supported' : false,
|
||||
'info' : "Illegal version of vagrant [${version}]. Need [Vagrant 1.8.6+]" ]
|
||||
}
|
||||
} else {
|
||||
return [ 'supported' : false,
|
||||
'info' : "Could not read installed vagrant version:\n" + version ]
|
||||
}
|
||||
} catch (ExecException e) {
|
||||
// Exec still throws this if it cannot find the command, regardless if ignoreExitValue is set.
|
||||
// Swallow error. Vagrant isn't installed. Don't halt the build here.
|
||||
return [ 'supported' : false, 'info' : "Could not find vagrant: " + e.message ]
|
||||
}
|
||||
}
|
||||
|
||||
private Map getVirtualBoxInstallation(Project project) {
|
||||
try {
|
||||
ByteArrayOutputStream pipe = new ByteArrayOutputStream()
|
||||
ExecResult runResult = project.exec {
|
||||
commandLine 'vboxmanage', '--version'
|
||||
standardOutput = pipe
|
||||
ignoreExitValue true
|
||||
}
|
||||
String version = pipe.toString().trim()
|
||||
if (runResult.exitValue == 0) {
|
||||
try {
|
||||
String[] versions = version.split('\\.')
|
||||
int major = Integer.parseInt(versions[0])
|
||||
int minor = Integer.parseInt(versions[1])
|
||||
if ((major < 5) || (major == 5 && minor < 1)) {
|
||||
return [ 'supported' : false,
|
||||
'info' : "Illegal version of virtualbox [${version}]. Need [5.1+]" ]
|
||||
} else {
|
||||
return [ 'supported' : true ]
|
||||
}
|
||||
} catch (NumberFormatException | ArrayIndexOutOfBoundsException e) {
|
||||
return [ 'supported' : false,
|
||||
'info' : "Unable to parse version of virtualbox [${version}]. Required [5.1+]" ]
|
||||
}
|
||||
} else {
|
||||
return [ 'supported': false, 'info': "Could not read installed virtualbox version:\n" + version ]
|
||||
}
|
||||
} catch (ExecException e) {
|
||||
// Exec still throws this if it cannot find the command, regardless if ignoreExitValue is set.
|
||||
// Swallow error. VirtualBox isn't installed. Don't halt the build here.
|
||||
return [ 'supported' : false, 'info' : "Could not find virtualbox: " + e.message ]
|
||||
}
|
||||
}
|
||||
|
||||
private void addVerifyInstallationTasks(Project project) {
|
||||
createCheckVagrantVersionTask(project)
|
||||
createCheckVirtualBoxVersionTask(project)
|
||||
}
|
||||
|
||||
private void createCheckVagrantVersionTask(Project project) {
|
||||
project.tasks.create('vagrantCheckVersion') {
|
||||
description 'Check the Vagrant version'
|
||||
group 'Verification'
|
||||
doLast {
|
||||
if (project.rootProject.vagrantInstallation.supported == false) {
|
||||
throw new InvalidUserDataException(project.rootProject.vagrantInstallation.info)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void createCheckVirtualBoxVersionTask(Project project) {
|
||||
project.tasks.create('virtualboxCheckVersion') {
|
||||
description 'Check the Virtualbox version'
|
||||
group 'Verification'
|
||||
doLast {
|
||||
if (project.rootProject.virtualBoxInstallation.supported == false) {
|
||||
throw new InvalidUserDataException(project.rootProject.virtualBoxInstallation.info)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
package org.elasticsearch.gradle.vagrant
|
||||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingPlugin
|
||||
import org.elasticsearch.gradle.FileContentsTask
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.artifacts.dsl.RepositoryHandler
|
||||
|
@ -20,7 +21,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
'fedora-25',
|
||||
'oel-6',
|
||||
'oel-7',
|
||||
'opensuse-13',
|
||||
'opensuse-42',
|
||||
'sles-12',
|
||||
'ubuntu-1404',
|
||||
'ubuntu-1604'
|
||||
|
@ -100,23 +101,10 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
private static void createBatsConfiguration(Project project) {
|
||||
project.configurations.create(BATS)
|
||||
|
||||
final long seed
|
||||
final String formattedSeed
|
||||
String maybeTestsSeed = System.getProperty("tests.seed")
|
||||
if (maybeTestsSeed != null) {
|
||||
if (maybeTestsSeed.trim().isEmpty()) {
|
||||
throw new GradleException("explicit tests.seed cannot be empty")
|
||||
}
|
||||
String masterSeed = maybeTestsSeed.tokenize(':').get(0)
|
||||
seed = new BigInteger(masterSeed, 16).longValue()
|
||||
formattedSeed = maybeTestsSeed
|
||||
} else {
|
||||
seed = new Random().nextLong()
|
||||
formattedSeed = String.format("%016X", seed)
|
||||
}
|
||||
|
||||
String upgradeFromVersion = System.getProperty("tests.packaging.upgradeVersion");
|
||||
if (upgradeFromVersion == null) {
|
||||
String firstPartOfSeed = project.rootProject.testSeed.tokenize(':').get(0)
|
||||
final long seed = Long.parseUnsignedLong(firstPartOfSeed, 16)
|
||||
upgradeFromVersion = project.indexCompatVersions[new Random(seed).nextInt(project.indexCompatVersions.size())]
|
||||
}
|
||||
|
||||
|
@ -130,8 +118,6 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
project.dependencies.add(BATS, "org.elasticsearch.distribution.${it}:elasticsearch:${upgradeFromVersion}@${it}")
|
||||
}
|
||||
|
||||
project.extensions.esvagrant.testSeed = seed
|
||||
project.extensions.esvagrant.formattedTestSeed = formattedSeed
|
||||
project.extensions.esvagrant.upgradeFromVersion = upgradeFromVersion
|
||||
}
|
||||
|
||||
|
@ -227,43 +213,6 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils, copyBatsArchives, createVersionFile, createUpgradeFromFile
|
||||
}
|
||||
|
||||
private static void createCheckVagrantVersionTask(Project project) {
|
||||
project.tasks.create('vagrantCheckVersion', Exec) {
|
||||
description 'Check the Vagrant version'
|
||||
group 'Verification'
|
||||
commandLine 'vagrant', '--version'
|
||||
standardOutput = new ByteArrayOutputStream()
|
||||
doLast {
|
||||
String version = standardOutput.toString().trim()
|
||||
if ((version ==~ /Vagrant 1\.(8\.[6-9]|9\.[0-9])+/) == false) {
|
||||
throw new InvalidUserDataException("Illegal version of vagrant [${version}]. Need [Vagrant 1.8.6+]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void createCheckVirtualBoxVersionTask(Project project) {
|
||||
project.tasks.create('virtualboxCheckVersion', Exec) {
|
||||
description 'Check the Virtualbox version'
|
||||
group 'Verification'
|
||||
commandLine 'vboxmanage', '--version'
|
||||
standardOutput = new ByteArrayOutputStream()
|
||||
doLast {
|
||||
String version = standardOutput.toString().trim()
|
||||
try {
|
||||
String[] versions = version.split('\\.')
|
||||
int major = Integer.parseInt(versions[0])
|
||||
int minor = Integer.parseInt(versions[1])
|
||||
if ((major < 5) || (major == 5 && minor < 1)) {
|
||||
throw new InvalidUserDataException("Illegal version of virtualbox [${version}]. Need [5.1+]")
|
||||
}
|
||||
} catch (NumberFormatException | ArrayIndexOutOfBoundsException e) {
|
||||
throw new InvalidUserDataException("Unable to parse version of virtualbox [${version}]. Required [5.1+]", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void createPackagingTestTask(Project project) {
|
||||
project.tasks.create('packagingTest') {
|
||||
group 'Verification'
|
||||
|
@ -291,8 +240,6 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
createCleanTask(project)
|
||||
createStopTask(project)
|
||||
createSmokeTestTask(project)
|
||||
createCheckVagrantVersionTask(project)
|
||||
createCheckVirtualBoxVersionTask(project)
|
||||
createPrepareVagrantTestEnvTask(project)
|
||||
createPackagingTestTask(project)
|
||||
createPlatformTestTask(project)
|
||||
|
@ -395,7 +342,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
void afterExecute(Task task, TaskState state) {
|
||||
if (state.failure != null) {
|
||||
println "REPRODUCE WITH: gradle ${packaging.path} " +
|
||||
"-Dtests.seed=${project.extensions.esvagrant.formattedTestSeed} "
|
||||
"-Dtests.seed=${project.testSeed} "
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -415,14 +362,14 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
environmentVars vagrantEnvVars
|
||||
dependsOn up
|
||||
finalizedBy halt
|
||||
args '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.extensions.esvagrant.formattedTestSeed}"
|
||||
args '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.testSeed}"
|
||||
}
|
||||
TaskExecutionAdapter platformReproListener = new TaskExecutionAdapter() {
|
||||
@Override
|
||||
void afterExecute(Task task, TaskState state) {
|
||||
if (state.failure != null) {
|
||||
println "REPRODUCE WITH: gradle ${platform.path} " +
|
||||
"-Dtests.seed=${project.extensions.esvagrant.formattedTestSeed} "
|
||||
"-Dtests.seed=${project.testSeed} "
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
implementation-class=org.elasticsearch.gradle.vagrant.VagrantSupportPlugin
|
|
@ -16,7 +16,6 @@
|
|||
<!-- Hopefully temporary suppression of LineLength on files that don't pass it. We should remove these when we the
|
||||
files start to pass. -->
|
||||
<suppress files="client[/\\]rest[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]HeapBufferedAsyncResponseConsumerTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]search[/\\]postingshighlight[/\\]CustomPostingsHighlighter.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]search[/\\]vectorhighlight[/\\]CustomFieldQuery.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]Action.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ActionRequestBuilder.java" checks="LineLength" />
|
||||
|
@ -428,7 +427,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]threadpool[/\\]ThreadPool.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]queries[/\\]BlendedTermQueryTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]search[/\\]postingshighlight[/\\]CustomPostingsHighlighterTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]VersionTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]RejectionActionIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]HotThreadsIT.java" checks="LineLength" />
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
|
||||
elasticsearch = 6.0.0-alpha2
|
||||
elasticsearch = 6.0.0-alpha3
|
||||
lucene = 7.0.0-snapshot-a0aef2f
|
||||
|
||||
# optional dependencies
|
||||
|
|
|
@ -57,6 +57,11 @@ public class RestNoopBulkAction extends BaseRestHandler {
|
|||
controller.registerHandler(PUT, "/{index}/{type}/_noop_bulk", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "noop_bulk_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
BulkRequest bulkRequest = Requests.bulkRequest();
|
||||
|
@ -73,8 +78,8 @@ public class RestNoopBulkAction extends BaseRestHandler {
|
|||
}
|
||||
bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
|
||||
bulkRequest.setRefreshPolicy(request.param("refresh"));
|
||||
bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, null, defaultPipeline, null, true,
|
||||
request.getXContentType());
|
||||
bulkRequest.add(request.requiredContent(), defaultIndex, defaultType, defaultRouting, defaultFields,
|
||||
null, defaultPipeline, null, true, request.getXContentType());
|
||||
|
||||
// short circuit the call to the transport layer
|
||||
return channel -> {
|
||||
|
|
|
@ -42,6 +42,11 @@ public class RestNoopSearchAction extends BaseRestHandler {
|
|||
controller.registerHandler(POST, "/{index}/{type}/_noop_search", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "noop_search_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
|
|
|
@ -33,7 +33,9 @@ import org.elasticsearch.action.bulk.BulkRequest;
|
|||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
|
@ -63,7 +65,7 @@ import java.util.StringJoiner;
|
|||
|
||||
final class Request {
|
||||
|
||||
private static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON;
|
||||
static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON;
|
||||
|
||||
final String method;
|
||||
final String endpoint;
|
||||
|
@ -338,6 +340,16 @@ final class Request {
|
|||
return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), entity);
|
||||
}
|
||||
|
||||
static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException {
|
||||
HttpEntity entity = createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request("GET", "/_search/scroll", Collections.emptyMap(), entity);
|
||||
}
|
||||
|
||||
static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOException {
|
||||
HttpEntity entity = createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request("DELETE", "/_search/scroll", Collections.emptyMap(), entity);
|
||||
}
|
||||
|
||||
private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
|
||||
BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef();
|
||||
return new ByteArrayEntity(source.bytes, source.offset, source.length, ContentType.create(xContentType.mediaType()));
|
||||
|
@ -483,7 +495,7 @@ final class Request {
|
|||
return this;
|
||||
}
|
||||
|
||||
Params withIndicesOptions (IndicesOptions indicesOptions) {
|
||||
Params withIndicesOptions(IndicesOptions indicesOptions) {
|
||||
putParam("ignore_unavailable", Boolean.toString(indicesOptions.ignoreUnavailable()));
|
||||
putParam("allow_no_indices", Boolean.toString(indicesOptions.allowNoIndices()));
|
||||
String expandWildcards;
|
||||
|
|
|
@ -36,8 +36,11 @@ import org.elasticsearch.action.index.IndexRequest;
|
|||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.main.MainRequest;
|
||||
import org.elasticsearch.action.main.MainResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
|
@ -325,14 +328,57 @@ public class RestHighLevelClient {
|
|||
performRequestAsyncAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
private <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
|
||||
/**
|
||||
* Executes a search using the Search Scroll api
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html">Search Scroll
|
||||
* API on elastic.co</a>
|
||||
*/
|
||||
public SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(searchScrollRequest, Request::searchScroll, SearchResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a search using the Search Scroll api
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html">Search Scroll
|
||||
* API on elastic.co</a>
|
||||
*/
|
||||
public void searchScrollAsync(SearchScrollRequest searchScrollRequest, ActionListener<SearchResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(searchScrollRequest, Request::searchScroll, SearchResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears one or more scroll ids using the Clear Scroll api
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api">
|
||||
* Clear Scroll API on elastic.co</a>
|
||||
*/
|
||||
public ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(clearScrollRequest, Request::clearScroll, ClearScrollResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously clears one or more scroll ids using the Clear Scroll api
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api">
|
||||
* Clear Scroll API on elastic.co</a>
|
||||
*/
|
||||
public void clearScrollAsync(ClearScrollRequest clearScrollRequest, ActionListener<ClearScrollResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(clearScrollRequest, Request::clearScroll, ClearScrollResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
protected <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
Set<Integer> ignores, Header... headers) throws IOException {
|
||||
return performRequest(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), ignores, headers);
|
||||
}
|
||||
|
||||
<Req extends ActionRequest, Resp> Resp performRequest(Req request,
|
||||
protected <Req extends ActionRequest, Resp> Resp performRequest(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
Set<Integer> ignores, Header... headers) throws IOException {
|
||||
|
@ -354,6 +400,7 @@ public class RestHighLevelClient {
|
|||
}
|
||||
throw parseResponseException(e);
|
||||
}
|
||||
|
||||
try {
|
||||
return responseConverter.apply(response);
|
||||
} catch(Exception e) {
|
||||
|
@ -361,7 +408,7 @@ public class RestHighLevelClient {
|
|||
}
|
||||
}
|
||||
|
||||
private <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request,
|
||||
protected <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores, Header... headers) {
|
||||
|
@ -369,7 +416,7 @@ public class RestHighLevelClient {
|
|||
listener, ignores, headers);
|
||||
}
|
||||
|
||||
<Req extends ActionRequest, Resp> void performRequestAsync(Req request,
|
||||
protected <Req extends ActionRequest, Resp> void performRequestAsync(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores, Header... headers) {
|
||||
|
|
|
@ -580,7 +580,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync);
|
||||
assertEquals(RestStatus.OK, bulkResponse.status());
|
||||
assertTrue(bulkResponse.getTookInMillis() > 0);
|
||||
assertTrue(bulkResponse.getTook().getMillis() > 0);
|
||||
assertEquals(nbItems, bulkResponse.getItems().length);
|
||||
|
||||
validateBulkResponses(nbItems, errors, bulkResponse, bulkRequest);
|
||||
|
@ -671,7 +671,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
BulkRequest bulkRequest = requestRef.get();
|
||||
|
||||
assertEquals(RestStatus.OK, bulkResponse.status());
|
||||
assertTrue(bulkResponse.getTookInMillis() > 0);
|
||||
assertTrue(bulkResponse.getTook().getMillis() > 0);
|
||||
assertEquals(nbItems, bulkResponse.getItems().length);
|
||||
assertNull(error.get());
|
||||
|
||||
|
|
|
@ -0,0 +1,181 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicRequestLine;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.main.MainRequest;
|
||||
import org.elasticsearch.action.main.MainResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.lang.reflect.Modifier;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.client.ESRestHighLevelClientTestCase.execute;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyMapOf;
|
||||
import static org.mockito.Matchers.anyObject;
|
||||
import static org.mockito.Matchers.anyVararg;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
/**
|
||||
* Test and demonstrates how {@link RestHighLevelClient} can be extended to support custom endpoints.
|
||||
*/
|
||||
public class CustomRestHighLevelClientTests extends ESTestCase {
|
||||
|
||||
private static final String ENDPOINT = "/_custom";
|
||||
|
||||
private CustomRestClient restHighLevelClient;
|
||||
|
||||
@Before
|
||||
@SuppressWarnings("unchecked")
|
||||
public void initClients() throws IOException {
|
||||
if (restHighLevelClient == null) {
|
||||
final RestClient restClient = mock(RestClient.class);
|
||||
restHighLevelClient = new CustomRestClient(restClient);
|
||||
|
||||
doAnswer(mock -> mockPerformRequest((Header) mock.getArguments()[4]))
|
||||
.when(restClient)
|
||||
.performRequest(eq(HttpGet.METHOD_NAME), eq(ENDPOINT), anyMapOf(String.class, String.class), anyObject(), anyVararg());
|
||||
|
||||
doAnswer(mock -> mockPerformRequestAsync((Header) mock.getArguments()[5], (ResponseListener) mock.getArguments()[4]))
|
||||
.when(restClient)
|
||||
.performRequestAsync(eq(HttpGet.METHOD_NAME), eq(ENDPOINT), anyMapOf(String.class, String.class),
|
||||
any(HttpEntity.class), any(ResponseListener.class), anyVararg());
|
||||
}
|
||||
}
|
||||
|
||||
public void testCustomEndpoint() throws IOException {
|
||||
final MainRequest request = new MainRequest();
|
||||
final Header header = new BasicHeader("node_name", randomAlphaOfLengthBetween(1, 10));
|
||||
|
||||
MainResponse response = execute(request, restHighLevelClient::custom, restHighLevelClient::customAsync, header);
|
||||
assertEquals(header.getValue(), response.getNodeName());
|
||||
|
||||
response = execute(request, restHighLevelClient::customAndParse, restHighLevelClient::customAndParseAsync, header);
|
||||
assertEquals(header.getValue(), response.getNodeName());
|
||||
}
|
||||
|
||||
/**
|
||||
* The {@link RestHighLevelClient} must declare the following execution methods using the <code>protected</code> modifier
|
||||
* so that they can be used by subclasses to implement custom logic.
|
||||
*/
|
||||
@SuppressForbidden(reason = "We're forced to uses Class#getDeclaredMethods() here because this test checks protected methods")
|
||||
public void testMethodsVisibility() throws ClassNotFoundException {
|
||||
String[] methodNames = new String[]{"performRequest", "performRequestAndParseEntity", "performRequestAsync",
|
||||
"performRequestAsyncAndParseEntity"};
|
||||
for (String methodName : methodNames) {
|
||||
boolean found = false;
|
||||
for (Method method : RestHighLevelClient.class.getDeclaredMethods()) {
|
||||
if (method.getName().equals(methodName)) {
|
||||
assertTrue("Method " + methodName + " must be protected", Modifier.isProtected(method.getModifiers()));
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
assertTrue("Failed to find method " + methodName, found);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mocks the asynchronous request execution by calling the {@link #mockPerformRequest(Header)} method.
|
||||
*/
|
||||
private Void mockPerformRequestAsync(Header httpHeader, ResponseListener responseListener) {
|
||||
try {
|
||||
responseListener.onSuccess(mockPerformRequest(httpHeader));
|
||||
} catch (IOException e) {
|
||||
responseListener.onFailure(e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mocks the synchronous request execution like if it was executed by Elasticsearch.
|
||||
*/
|
||||
private Response mockPerformRequest(Header httpHeader) throws IOException {
|
||||
ProtocolVersion protocol = new ProtocolVersion("HTTP", 1, 1);
|
||||
HttpResponse httpResponse = new BasicHttpResponse(new BasicStatusLine(protocol, 200, "OK"));
|
||||
|
||||
MainResponse response = new MainResponse(httpHeader.getValue(), Version.CURRENT, ClusterName.DEFAULT, "_na", Build.CURRENT, true);
|
||||
BytesRef bytesRef = XContentHelper.toXContent(response, XContentType.JSON, false).toBytesRef();
|
||||
httpResponse.setEntity(new ByteArrayEntity(bytesRef.bytes, ContentType.APPLICATION_JSON));
|
||||
|
||||
RequestLine requestLine = new BasicRequestLine(HttpGet.METHOD_NAME, ENDPOINT, protocol);
|
||||
return new Response(requestLine, new HttpHost("localhost", 9200), httpResponse);
|
||||
}
|
||||
|
||||
/**
|
||||
* A custom high level client that provides custom methods to execute a request and get its associate response back.
|
||||
*/
|
||||
static class CustomRestClient extends RestHighLevelClient {
|
||||
|
||||
private CustomRestClient(RestClient restClient) {
|
||||
super(restClient);
|
||||
}
|
||||
|
||||
MainResponse custom(MainRequest mainRequest, Header... headers) throws IOException {
|
||||
return performRequest(mainRequest, this::toRequest, this::toResponse, emptySet(), headers);
|
||||
}
|
||||
|
||||
MainResponse customAndParse(MainRequest mainRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(mainRequest, this::toRequest, MainResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
void customAsync(MainRequest mainRequest, ActionListener<MainResponse> listener, Header... headers) {
|
||||
performRequestAsync(mainRequest, this::toRequest, this::toResponse, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
void customAndParseAsync(MainRequest mainRequest, ActionListener<MainResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(mainRequest, this::toRequest, MainResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
Request toRequest(MainRequest mainRequest) throws IOException {
|
||||
return new Request(HttpGet.METHOD_NAME, ENDPOINT, emptyMap(), null);
|
||||
}
|
||||
|
||||
MainResponse toResponse(Response response) throws IOException {
|
||||
return parseEntity(response.getEntity(), MainResponse::fromXContent);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -28,7 +28,9 @@ import org.elasticsearch.action.bulk.BulkShardRequest;
|
|||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
|
@ -40,6 +42,7 @@ import org.elasticsearch.common.bytes.BytesArray;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -714,12 +717,44 @@ public class RequestTests extends ESTestCase {
|
|||
if (searchSourceBuilder == null) {
|
||||
assertNull(request.entity);
|
||||
} else {
|
||||
BytesReference expectedBytes = XContentHelper.toXContent(searchSourceBuilder, XContentType.JSON, false);
|
||||
assertEquals(XContentType.JSON.mediaType(), request.entity.getContentType().getValue());
|
||||
assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(request.entity)));
|
||||
assertToXContentBody(searchSourceBuilder, request.entity);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSearchScroll() throws IOException {
|
||||
SearchScrollRequest searchScrollRequest = new SearchScrollRequest();
|
||||
searchScrollRequest.scrollId(randomAlphaOfLengthBetween(5, 10));
|
||||
if (randomBoolean()) {
|
||||
searchScrollRequest.scroll(randomPositiveTimeValue());
|
||||
}
|
||||
Request request = Request.searchScroll(searchScrollRequest);
|
||||
assertEquals("GET", request.method);
|
||||
assertEquals("/_search/scroll", request.endpoint);
|
||||
assertEquals(0, request.params.size());
|
||||
assertToXContentBody(searchScrollRequest, request.entity);
|
||||
assertEquals(Request.REQUEST_BODY_CONTENT_TYPE.mediaType(), request.entity.getContentType().getValue());
|
||||
}
|
||||
|
||||
public void testClearScroll() throws IOException {
|
||||
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
|
||||
int numScrolls = randomIntBetween(1, 10);
|
||||
for (int i = 0; i < numScrolls; i++) {
|
||||
clearScrollRequest.addScrollId(randomAlphaOfLengthBetween(5, 10));
|
||||
}
|
||||
Request request = Request.clearScroll(clearScrollRequest);
|
||||
assertEquals("DELETE", request.method);
|
||||
assertEquals("/_search/scroll", request.endpoint);
|
||||
assertEquals(0, request.params.size());
|
||||
assertToXContentBody(clearScrollRequest, request.entity);
|
||||
assertEquals(Request.REQUEST_BODY_CONTENT_TYPE.mediaType(), request.entity.getContentType().getValue());
|
||||
}
|
||||
|
||||
private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException {
|
||||
BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, Request.REQUEST_BODY_CONTENT_TYPE, false);
|
||||
assertEquals(XContentType.JSON.mediaType(), actualEntity.getContentType().getValue());
|
||||
assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity)));
|
||||
}
|
||||
|
||||
public void testParams() {
|
||||
final int nbParams = randomIntBetween(0, 10);
|
||||
Request.Params params = Request.Params.builder();
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.http.entity.StringEntity;
|
|||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicRequestLine;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -41,21 +42,28 @@ import org.elasticsearch.action.ActionRequest;
|
|||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.main.MainRequest;
|
||||
import org.elasticsearch.action.main.MainResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchResponseSections;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.cbor.CborXContent;
|
||||
import org.elasticsearch.common.xcontent.smile.SmileXContent;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.aggregations.Aggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
import org.mockito.ArgumentMatcher;
|
||||
import org.mockito.Matchers;
|
||||
import org.mockito.internal.matchers.ArrayEquals;
|
||||
import org.mockito.internal.matchers.VarargMatcher;
|
||||
|
||||
|
@ -68,6 +76,7 @@ import java.util.Map;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.client.RestClientTestUtil.randomHeaders;
|
||||
import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.mockito.Matchers.anyMapOf;
|
||||
|
@ -76,6 +85,8 @@ import static org.mockito.Matchers.anyString;
|
|||
import static org.mockito.Matchers.anyVararg;
|
||||
import static org.mockito.Matchers.argThat;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Matchers.isNotNull;
|
||||
import static org.mockito.Matchers.isNull;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
@ -95,49 +106,83 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testPingSuccessful() throws IOException {
|
||||
Header[] headers = RestClientTestUtil.randomHeaders(random(), "Header");
|
||||
Header[] headers = randomHeaders(random(), "Header");
|
||||
Response response = mock(Response.class);
|
||||
when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.OK));
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(response);
|
||||
assertTrue(restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), eq(Collections.emptyMap()),
|
||||
Matchers.isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testPing404NotFound() throws IOException {
|
||||
Header[] headers = RestClientTestUtil.randomHeaders(random(), "Header");
|
||||
Header[] headers = randomHeaders(random(), "Header");
|
||||
Response response = mock(Response.class);
|
||||
when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.NOT_FOUND));
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(response);
|
||||
assertFalse(restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), eq(Collections.emptyMap()),
|
||||
Matchers.isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testPingSocketTimeout() throws IOException {
|
||||
Header[] headers = RestClientTestUtil.randomHeaders(random(), "Header");
|
||||
Header[] headers = randomHeaders(random(), "Header");
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(new SocketTimeoutException());
|
||||
expectThrows(SocketTimeoutException.class, () -> restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), eq(Collections.emptyMap()),
|
||||
Matchers.isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testInfo() throws IOException {
|
||||
Header[] headers = RestClientTestUtil.randomHeaders(random(), "Header");
|
||||
Response response = mock(Response.class);
|
||||
Header[] headers = randomHeaders(random(), "Header");
|
||||
MainResponse testInfo = new MainResponse("nodeName", Version.CURRENT, new ClusterName("clusterName"), "clusterUuid",
|
||||
Build.CURRENT, true);
|
||||
when(response.getEntity()).thenReturn(
|
||||
new StringEntity(toXContent(testInfo, XContentType.JSON, false).utf8ToString(), ContentType.APPLICATION_JSON));
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(response);
|
||||
mockResponse(testInfo);
|
||||
MainResponse receivedInfo = restHighLevelClient.info(headers);
|
||||
assertEquals(testInfo, receivedInfo);
|
||||
verify(restClient).performRequest(eq("GET"), eq("/"), eq(Collections.emptyMap()),
|
||||
Matchers.isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testSearchScroll() throws IOException {
|
||||
Header[] headers = randomHeaders(random(), "Header");
|
||||
SearchResponse mockSearchResponse = new SearchResponse(new SearchResponseSections(SearchHits.empty(), InternalAggregations.EMPTY,
|
||||
null, false, false, null, 1), randomAlphaOfLengthBetween(5, 10), 5, 5, 100, new ShardSearchFailure[0]);
|
||||
mockResponse(mockSearchResponse);
|
||||
SearchResponse searchResponse = restHighLevelClient.searchScroll(new SearchScrollRequest(randomAlphaOfLengthBetween(5, 10)),
|
||||
headers);
|
||||
assertEquals(mockSearchResponse.getScrollId(), searchResponse.getScrollId());
|
||||
assertEquals(0, searchResponse.getHits().totalHits);
|
||||
assertEquals(5, searchResponse.getTotalShards());
|
||||
assertEquals(5, searchResponse.getSuccessfulShards());
|
||||
assertEquals(100, searchResponse.getTook().getMillis());
|
||||
verify(restClient).performRequest(eq("GET"), eq("/_search/scroll"), eq(Collections.emptyMap()),
|
||||
isNotNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testClearScroll() throws IOException {
|
||||
Header[] headers = randomHeaders(random(), "Header");
|
||||
ClearScrollResponse mockClearScrollResponse = new ClearScrollResponse(randomBoolean(), randomIntBetween(0, Integer.MAX_VALUE));
|
||||
mockResponse(mockClearScrollResponse);
|
||||
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
|
||||
clearScrollRequest.addScrollId(randomAlphaOfLengthBetween(5, 10));
|
||||
ClearScrollResponse clearScrollResponse = restHighLevelClient.clearScroll(clearScrollRequest, headers);
|
||||
assertEquals(mockClearScrollResponse.isSucceeded(), clearScrollResponse.isSucceeded());
|
||||
assertEquals(mockClearScrollResponse.getNumFreed(), clearScrollResponse.getNumFreed());
|
||||
verify(restClient).performRequest(eq("DELETE"), eq("/_search/scroll"), eq(Collections.emptyMap()),
|
||||
isNotNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
private void mockResponse(ToXContent toXContent) throws IOException {
|
||||
Response response = mock(Response.class);
|
||||
ContentType contentType = ContentType.parse(Request.REQUEST_BODY_CONTENT_TYPE.mediaType());
|
||||
String requestBody = toXContent(toXContent, Request.REQUEST_BODY_CONTENT_TYPE, false).utf8ToString();
|
||||
when(response.getEntity()).thenReturn(new NStringEntity(requestBody, contentType));
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(response);
|
||||
}
|
||||
|
||||
public void testRequestValidation() {
|
||||
|
|
|
@ -19,11 +19,19 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||
import org.elasticsearch.join.aggregations.Children;
|
||||
import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder;
|
||||
|
@ -37,6 +45,7 @@ import org.elasticsearch.search.aggregations.matrix.stats.MatrixStats;
|
|||
import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.search.suggest.SuggestBuilder;
|
||||
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder;
|
||||
|
@ -46,10 +55,14 @@ import java.io.IOException;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.both;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
|
||||
public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
|
@ -161,7 +174,6 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
assertSearchHeader(searchResponse);
|
||||
assertNull(searchResponse.getSuggest());
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
assertThat(searchResponse.getTook().nanos(), greaterThan(0L));
|
||||
assertEquals(5, searchResponse.getHits().totalHits);
|
||||
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||
|
@ -244,7 +256,6 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
assertSearchHeader(searchResponse);
|
||||
assertNull(searchResponse.getSuggest());
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
assertThat(searchResponse.getTook().nanos(), greaterThan(0L));
|
||||
assertEquals(5, searchResponse.getHits().totalHits);
|
||||
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||
|
@ -324,7 +335,6 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
assertSearchHeader(searchResponse);
|
||||
assertNull(searchResponse.getSuggest());
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
assertThat(searchResponse.getTook().nanos(), greaterThan(0L));
|
||||
assertEquals(3, searchResponse.getHits().totalHits);
|
||||
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||
|
@ -385,8 +395,67 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSearchScroll() throws Exception {
|
||||
|
||||
for (int i = 0; i < 100; i++) {
|
||||
XContentBuilder builder = jsonBuilder().startObject().field("field", i).endObject();
|
||||
HttpEntity entity = new NStringEntity(builder.string(), ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "test/type1/" + Integer.toString(i), Collections.emptyMap(), entity);
|
||||
}
|
||||
client().performRequest("POST", "/test/_refresh");
|
||||
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(35).sort("field", SortOrder.ASC);
|
||||
SearchRequest searchRequest = new SearchRequest("test").scroll(TimeValue.timeValueMinutes(2)).source(searchSourceBuilder);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
|
||||
try {
|
||||
long counter = 0;
|
||||
assertSearchHeader(searchResponse);
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(100L));
|
||||
assertThat(searchResponse.getHits().getHits().length, equalTo(35));
|
||||
for (SearchHit hit : searchResponse.getHits()) {
|
||||
assertThat(((Number) hit.getSortValues()[0]).longValue(), equalTo(counter++));
|
||||
}
|
||||
|
||||
searchResponse = execute(new SearchScrollRequest(searchResponse.getScrollId()).scroll(TimeValue.timeValueMinutes(2)),
|
||||
highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync);
|
||||
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(100L));
|
||||
assertThat(searchResponse.getHits().getHits().length, equalTo(35));
|
||||
for (SearchHit hit : searchResponse.getHits()) {
|
||||
assertEquals(counter++, ((Number) hit.getSortValues()[0]).longValue());
|
||||
}
|
||||
|
||||
searchResponse = execute(new SearchScrollRequest(searchResponse.getScrollId()).scroll(TimeValue.timeValueMinutes(2)),
|
||||
highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync);
|
||||
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(100L));
|
||||
assertThat(searchResponse.getHits().getHits().length, equalTo(30));
|
||||
for (SearchHit hit : searchResponse.getHits()) {
|
||||
assertEquals(counter++, ((Number) hit.getSortValues()[0]).longValue());
|
||||
}
|
||||
} finally {
|
||||
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
|
||||
clearScrollRequest.addScrollId(searchResponse.getScrollId());
|
||||
ClearScrollResponse clearScrollResponse = execute(clearScrollRequest,
|
||||
// Not using a method reference to work around https://bugs.eclipse.org/bugs/show_bug.cgi?id=517951
|
||||
(request, headers) -> highLevelClient().clearScroll(request, headers),
|
||||
(request, listener, headers) -> highLevelClient().clearScrollAsync(request, listener, headers));
|
||||
assertThat(clearScrollResponse.getNumFreed(), greaterThan(0));
|
||||
assertTrue(clearScrollResponse.isSucceeded());
|
||||
|
||||
SearchScrollRequest scrollRequest = new SearchScrollRequest(searchResponse.getScrollId()).scroll(TimeValue.timeValueMinutes(2));
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> execute(scrollRequest,
|
||||
highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
assertThat(exception.getRootCause(), instanceOf(ElasticsearchException.class));
|
||||
ElasticsearchException rootCause = (ElasticsearchException) exception.getRootCause();
|
||||
assertThat(rootCause.getMessage(), containsString("No search context found for"));
|
||||
}
|
||||
}
|
||||
|
||||
private static void assertSearchHeader(SearchResponse searchResponse) {
|
||||
assertThat(searchResponse.getTook().nanos(), greaterThan(0L));
|
||||
assertThat(searchResponse.getTook().nanos(), greaterThanOrEqualTo(0L));
|
||||
assertEquals(0, searchResponse.getFailedShards());
|
||||
assertThat(searchResponse.getTotalShards(), greaterThan(0));
|
||||
assertEquals(searchResponse.getTotalShards(), searchResponse.getSuccessfulShards());
|
||||
|
|
|
@ -81,7 +81,7 @@ dependencies {
|
|||
compile "com.vividsolutions:jts:${versions.jts}", optional
|
||||
|
||||
// logging
|
||||
compile "org.apache.logging.log4j:log4j-api:${versions.log4j}", optional
|
||||
compile "org.apache.logging.log4j:log4j-api:${versions.log4j}"
|
||||
compile "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional
|
||||
// to bridge dependencies that are still on Log4j 1 to Log4j 2
|
||||
compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}", optional
|
||||
|
@ -118,7 +118,6 @@ compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-tr
|
|||
forbiddenPatterns {
|
||||
exclude '**/*.json'
|
||||
exclude '**/*.jmx'
|
||||
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
|
||||
}
|
||||
|
||||
task generateModulesList {
|
||||
|
|
|
@ -1,129 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
* This {@link Analyzer} wraps another analyzer and adds a set of prefixes to the
|
||||
* underlying TokenStream. While these prefixes are iterated the position attribute
|
||||
* will not be incremented. Also each prefix will be separated from the other tokens
|
||||
* by a separator character.
|
||||
* NOTE: The sequence of prefixes needs to be not empty
|
||||
*/
|
||||
public class PrefixAnalyzer extends Analyzer {
|
||||
|
||||
private final char separator;
|
||||
private final Iterable<? extends CharSequence> prefix;
|
||||
private final Analyzer analyzer;
|
||||
|
||||
/**
|
||||
* Create a new {@link PrefixAnalyzer}. The separator will be set to the DEFAULT_SEPARATOR.
|
||||
*
|
||||
* @param analyzer {@link Analyzer} to wrap
|
||||
* @param prefix Single prefix
|
||||
*/
|
||||
public PrefixAnalyzer(Analyzer analyzer, char separator, CharSequence prefix) {
|
||||
this(analyzer, separator, Collections.singleton(prefix));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link PrefixAnalyzer}. The separator will be set to the DEFAULT_SEPARATOR.
|
||||
*
|
||||
* @param analyzer {@link Analyzer} to wrap
|
||||
* @param prefix {@link Iterable} of {@link CharSequence} which keeps all prefixes
|
||||
*/
|
||||
public PrefixAnalyzer(Analyzer analyzer, char separator, Iterable<? extends CharSequence> prefix) {
|
||||
super();
|
||||
this.analyzer = analyzer;
|
||||
this.prefix = prefix;
|
||||
this.separator = separator;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName) {
|
||||
TokenStreamComponents createComponents = analyzer.createComponents(fieldName);
|
||||
TokenStream stream = new PrefixTokenFilter(createComponents.getTokenStream(), separator, prefix);
|
||||
TokenStreamComponents tsc = new TokenStreamComponents(createComponents.getTokenizer(), stream);
|
||||
return tsc;
|
||||
}
|
||||
|
||||
/**
|
||||
* The {@link PrefixTokenFilter} wraps a {@link TokenStream} and adds a set
|
||||
* prefixes ahead. The position attribute will not be incremented for the prefixes.
|
||||
*/
|
||||
public static final class PrefixTokenFilter extends TokenFilter {
|
||||
|
||||
private final char separator;
|
||||
private final CharTermAttribute termAttr = addAttribute(CharTermAttribute.class);
|
||||
private final PositionIncrementAttribute posAttr = addAttribute(PositionIncrementAttribute.class);
|
||||
private final Iterable<? extends CharSequence> prefixes;
|
||||
|
||||
private Iterator<? extends CharSequence> currentPrefix;
|
||||
|
||||
/**
|
||||
* Create a new {@link PrefixTokenFilter}. The separator will be set to the DEFAULT_SEPARATOR.
|
||||
*
|
||||
* @param input {@link TokenStream} to wrap
|
||||
* @param separator Character used separate prefixes from other tokens
|
||||
* @param prefixes {@link Iterable} of {@link CharSequence} which keeps all prefixes
|
||||
*/
|
||||
public PrefixTokenFilter(TokenStream input, char separator, Iterable<? extends CharSequence> prefixes) {
|
||||
super(input);
|
||||
this.prefixes = prefixes;
|
||||
this.currentPrefix = null;
|
||||
this.separator = separator;
|
||||
if (prefixes == null || !prefixes.iterator().hasNext()) {
|
||||
throw new IllegalArgumentException("one or more prefixes needed");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean incrementToken() throws IOException {
|
||||
if (currentPrefix != null) {
|
||||
if (!currentPrefix.hasNext()) {
|
||||
return input.incrementToken();
|
||||
} else {
|
||||
posAttr.setPositionIncrement(0);
|
||||
}
|
||||
} else {
|
||||
currentPrefix = prefixes.iterator();
|
||||
termAttr.setEmpty();
|
||||
posAttr.setPositionIncrement(1);
|
||||
assert (currentPrefix.hasNext()) : "one or more prefixes needed";
|
||||
}
|
||||
termAttr.setEmpty();
|
||||
termAttr.append(currentPrefix.next());
|
||||
termAttr.append(separator);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reset() throws IOException {
|
||||
super.reset();
|
||||
currentPrefix = null;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -296,27 +296,6 @@ public abstract class BlendedTermQuery extends Query {
|
|||
return Objects.hash(classHash(), Arrays.hashCode(equalsTerms()));
|
||||
}
|
||||
|
||||
public static BlendedTermQuery booleanBlendedQuery(Term[] terms) {
|
||||
return booleanBlendedQuery(terms, null);
|
||||
}
|
||||
|
||||
public static BlendedTermQuery booleanBlendedQuery(Term[] terms, final float[] boosts) {
|
||||
return new BlendedTermQuery(terms, boosts) {
|
||||
@Override
|
||||
protected Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc) {
|
||||
BooleanQuery.Builder booleanQueryBuilder = new BooleanQuery.Builder();
|
||||
for (int i = 0; i < terms.length; i++) {
|
||||
Query query = new TermQuery(terms[i], ctx[i]);
|
||||
if (boosts != null && boosts[i] != 1f) {
|
||||
query = new BoostQuery(query, boosts[i]);
|
||||
}
|
||||
booleanQueryBuilder.add(query, BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
return booleanQueryBuilder.build();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public static BlendedTermQuery commonTermsBlendedQuery(Term[] terms, final float[] boosts, final float maxTermFrequency) {
|
||||
return new BlendedTermQuery(terms, boosts) {
|
||||
@Override
|
||||
|
|
|
@ -66,46 +66,54 @@ public final class MinDocQuery extends Query {
|
|||
return null;
|
||||
}
|
||||
final int segmentMinDoc = Math.max(0, minDoc - context.docBase);
|
||||
final DocIdSetIterator disi = new DocIdSetIterator() {
|
||||
|
||||
int doc = -1;
|
||||
|
||||
@Override
|
||||
public int docID() {
|
||||
return doc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
return advance(doc + 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
assert target > doc;
|
||||
if (doc == -1) {
|
||||
// skip directly to minDoc
|
||||
doc = Math.max(target, segmentMinDoc);
|
||||
} else {
|
||||
doc = target;
|
||||
}
|
||||
if (doc >= maxDoc) {
|
||||
doc = NO_MORE_DOCS;
|
||||
}
|
||||
return doc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long cost() {
|
||||
return maxDoc - segmentMinDoc;
|
||||
}
|
||||
|
||||
};
|
||||
final DocIdSetIterator disi = new MinDocIterator(segmentMinDoc, maxDoc);
|
||||
return new ConstantScoreScorer(this, score(), disi);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
static class MinDocIterator extends DocIdSetIterator {
|
||||
final int segmentMinDoc;
|
||||
final int maxDoc;
|
||||
int doc = -1;
|
||||
|
||||
MinDocIterator(int segmentMinDoc, int maxDoc) {
|
||||
this.segmentMinDoc = segmentMinDoc;
|
||||
this.maxDoc = maxDoc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docID() {
|
||||
return doc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
return advance(doc + 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
assert target > doc;
|
||||
if (doc == -1) {
|
||||
// skip directly to minDoc
|
||||
doc = Math.max(target, segmentMinDoc);
|
||||
} else {
|
||||
doc = target;
|
||||
}
|
||||
if (doc >= maxDoc) {
|
||||
doc = NO_MORE_DOCS;
|
||||
}
|
||||
return doc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long cost() {
|
||||
return maxDoc - segmentMinDoc;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
return "MinDocQuery(minDoc=" + minDoc + ")";
|
||||
|
|
|
@ -0,0 +1,165 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.queries;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.ConstantScoreScorer;
|
||||
import org.apache.lucene.search.ConstantScoreWeight;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.EarlyTerminatingSortingCollector;
|
||||
import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.search.FieldDoc;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.LeafFieldComparator;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.Weight;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A {@link Query} that only matches documents that are greater than the provided {@link FieldDoc}.
|
||||
* This works only if the index is sorted according to the given search {@link Sort}.
|
||||
*/
|
||||
public class SearchAfterSortedDocQuery extends Query {
|
||||
private final Sort sort;
|
||||
private final FieldDoc after;
|
||||
private final FieldComparator<?>[] fieldComparators;
|
||||
private final int[] reverseMuls;
|
||||
|
||||
public SearchAfterSortedDocQuery(Sort sort, FieldDoc after) {
|
||||
if (sort.getSort().length != after.fields.length) {
|
||||
throw new IllegalArgumentException("after doc has " + after.fields.length + " value(s) but sort has "
|
||||
+ sort.getSort().length + ".");
|
||||
}
|
||||
this.sort = sort;
|
||||
this.after = after;
|
||||
int numFields = sort.getSort().length;
|
||||
this.fieldComparators = new FieldComparator[numFields];
|
||||
this.reverseMuls = new int[numFields];
|
||||
for (int i = 0; i < numFields; i++) {
|
||||
SortField sortField = sort.getSort()[i];
|
||||
FieldComparator<?> fieldComparator = sortField.getComparator(1, i);
|
||||
@SuppressWarnings("unchecked")
|
||||
FieldComparator<Object> comparator = (FieldComparator<Object>) fieldComparator;
|
||||
comparator.setTopValue(after.fields[i]);
|
||||
fieldComparators[i] = fieldComparator;
|
||||
reverseMuls[i] = sortField.getReverse() ? -1 : 1;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
|
||||
return new ConstantScoreWeight(this, 1.0f) {
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
Sort segmentSort = context.reader().getMetaData().getSort();
|
||||
if (EarlyTerminatingSortingCollector.canEarlyTerminate(sort, segmentSort) == false) {
|
||||
throw new IOException("search sort :[" + sort.getSort() + "] does not match the index sort:[" + segmentSort + "]");
|
||||
}
|
||||
final int afterDoc = after.doc - context.docBase;
|
||||
TopComparator comparator= getTopComparator(fieldComparators, reverseMuls, context, afterDoc);
|
||||
final int maxDoc = context.reader().maxDoc();
|
||||
final int firstDoc = searchAfterDoc(comparator, 0, context.reader().maxDoc());
|
||||
if (firstDoc >= maxDoc) {
|
||||
return null;
|
||||
}
|
||||
final DocIdSetIterator disi = new MinDocQuery.MinDocIterator(firstDoc, maxDoc);
|
||||
return new ConstantScoreScorer(this, score(), disi);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
return "SearchAfterSortedDocQuery(sort=" + sort + ", afterDoc=" + after.toString() + ")";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
return sameClassAs(other) &&
|
||||
equalsTo(getClass().cast(other));
|
||||
}
|
||||
|
||||
private boolean equalsTo(SearchAfterSortedDocQuery other) {
|
||||
return sort.equals(other.sort) &&
|
||||
after.doc == other.after.doc &&
|
||||
Double.compare(after.score, other.after.score) == 0 &&
|
||||
Arrays.equals(after.fields, other.after.fields);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(classHash(), sort, after.doc, after.score, Arrays.hashCode(after.fields));
|
||||
}
|
||||
|
||||
interface TopComparator {
|
||||
boolean lessThanTop(int doc) throws IOException;
|
||||
}
|
||||
|
||||
static TopComparator getTopComparator(FieldComparator<?>[] fieldComparators,
|
||||
int[] reverseMuls,
|
||||
LeafReaderContext leafReaderContext,
|
||||
int topDoc) {
|
||||
return doc -> {
|
||||
// DVs use forward iterators so we recreate the iterator for each sort field
|
||||
// every time we need to compare a document with the <code>after<code> doc.
|
||||
// We could reuse the iterators when the comparison goes forward but
|
||||
// this should only be called a few time per segment (binary search).
|
||||
for (int i = 0; i < fieldComparators.length; i++) {
|
||||
LeafFieldComparator comparator = fieldComparators[i].getLeafComparator(leafReaderContext);
|
||||
int value = reverseMuls[i] * comparator.compareTop(doc);
|
||||
if (value != 0) {
|
||||
return value < 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (topDoc <= doc) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the first doc id greater than the provided <code>after</code> doc.
|
||||
*/
|
||||
static int searchAfterDoc(TopComparator comparator, int from, int to) throws IOException {
|
||||
int low = from;
|
||||
int high = to - 1;
|
||||
|
||||
while (low <= high) {
|
||||
int mid = (low + high) >>> 1;
|
||||
if (comparator.lessThanTop(mid)) {
|
||||
high = mid - 1;
|
||||
} else {
|
||||
low = mid + 1;
|
||||
}
|
||||
}
|
||||
return low;
|
||||
}
|
||||
|
||||
}
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
|||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
|
@ -155,31 +156,20 @@ public class MapperQueryParser extends QueryParser {
|
|||
// if there is no match in the mappings.
|
||||
return new MatchNoDocsQuery("empty fields");
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getFieldQuerySingle(mField, queryText, quoted);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f;
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getFieldQuerySingle(mField, queryText, quoted);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
Query q = getFieldQuerySingle(mField, queryText, quoted);
|
||||
if (q != null) {
|
||||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
if (clauses.isEmpty()) return null; // happens for stopwords
|
||||
return getBooleanQuery(clauses);
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, tiebreaker);
|
||||
} else {
|
||||
return getFieldQuerySingle(field, queryText, quoted);
|
||||
}
|
||||
|
@ -255,33 +245,21 @@ public class MapperQueryParser extends QueryParser {
|
|||
protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException {
|
||||
Collection<String> fields = extractMultiFields(field);
|
||||
if (fields != null) {
|
||||
if (settings.useDisMax()) {
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = super.getFieldQuery(mField, queryText, slop);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
q = applySlop(q, slop);
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f;
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = super.getFieldQuery(mField, queryText, slop);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
q = applySlop(q, slop);
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
Query q = super.getFieldQuery(mField, queryText, slop);
|
||||
if (q != null) {
|
||||
q = applySlop(q, slop);
|
||||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
if (clauses.isEmpty()) return null; // happens for stopwords
|
||||
return getBooleanQuery(clauses);
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, tiebreaker);
|
||||
} else {
|
||||
return super.getFieldQuery(field, queryText, slop);
|
||||
}
|
||||
|
@ -308,31 +286,20 @@ public class MapperQueryParser extends QueryParser {
|
|||
return getRangeQuerySingle(fields.iterator().next(), part1, part2, startInclusive, endInclusive, context);
|
||||
}
|
||||
|
||||
if (settings.useDisMax()) {
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive, context);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f;
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive, context);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive, context);
|
||||
if (q != null) {
|
||||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
if (clauses.isEmpty()) return null; // happens for stopwords
|
||||
return getBooleanQuery(clauses);
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, tiebreaker);
|
||||
}
|
||||
|
||||
private Query getRangeQuerySingle(String field, String part1, String part2,
|
||||
|
@ -367,30 +334,20 @@ public class MapperQueryParser extends QueryParser {
|
|||
if (fields.size() == 1) {
|
||||
return getFuzzyQuerySingle(fields.iterator().next(), termStr, minSimilarity);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f;
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity);
|
||||
if (q != null) {
|
||||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
return getBooleanQuery(clauses);
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, tiebreaker);
|
||||
} else {
|
||||
return getFuzzyQuerySingle(field, termStr, minSimilarity);
|
||||
}
|
||||
|
@ -430,31 +387,20 @@ public class MapperQueryParser extends QueryParser {
|
|||
if (fields.size() == 1) {
|
||||
return getPrefixQuerySingle(fields.iterator().next(), termStr);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getPrefixQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f;
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getPrefixQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
Query q = getPrefixQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
if (clauses.isEmpty()) return null; // happens for stopwords
|
||||
return getBooleanQuery(clauses);
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, tiebreaker);
|
||||
} else {
|
||||
return getPrefixQuerySingle(field, termStr);
|
||||
}
|
||||
|
@ -592,31 +538,20 @@ public class MapperQueryParser extends QueryParser {
|
|||
if (fields.size() == 1) {
|
||||
return getWildcardQuerySingle(fields.iterator().next(), termStr);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getWildcardQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f;
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getWildcardQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
Query q = getWildcardQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
if (clauses.isEmpty()) return null; // happens for stopwords
|
||||
return getBooleanQuery(clauses);
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, tiebreaker);
|
||||
} else {
|
||||
return getWildcardQuerySingle(field, termStr);
|
||||
}
|
||||
|
@ -656,31 +591,20 @@ public class MapperQueryParser extends QueryParser {
|
|||
if (fields.size() == 1) {
|
||||
return getRegexpQuerySingle(fields.iterator().next(), termStr);
|
||||
}
|
||||
if (settings.useDisMax()) {
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getRegexpQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f;
|
||||
List<Query> queries = new ArrayList<>();
|
||||
boolean added = false;
|
||||
for (String mField : fields) {
|
||||
Query q = getRegexpQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
added = true;
|
||||
queries.add(applyBoost(mField, q));
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, settings.tieBreaker());
|
||||
} else {
|
||||
List<BooleanClause> clauses = new ArrayList<>();
|
||||
for (String mField : fields) {
|
||||
Query q = getRegexpQuerySingle(mField, termStr);
|
||||
if (q != null) {
|
||||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
if (clauses.isEmpty()) return null; // happens for stopwords
|
||||
return getBooleanQuery(clauses);
|
||||
}
|
||||
if (!added) {
|
||||
return null;
|
||||
}
|
||||
return new DisjunctionMaxQuery(queries, tiebreaker);
|
||||
} else {
|
||||
return getRegexpQuerySingle(field, termStr);
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ import java.util.Collection;
|
|||
abstract class CollapsingDocValuesSource<T> extends GroupSelector<T> {
|
||||
protected final String field;
|
||||
|
||||
CollapsingDocValuesSource(String field) throws IOException {
|
||||
CollapsingDocValuesSource(String field) {
|
||||
this.field = field;
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ abstract class CollapsingDocValuesSource<T> extends GroupSelector<T> {
|
|||
private long value;
|
||||
private boolean hasValue;
|
||||
|
||||
Numeric(String field) throws IOException {
|
||||
Numeric(String field) {
|
||||
super(field);
|
||||
}
|
||||
|
||||
|
@ -148,7 +148,7 @@ abstract class CollapsingDocValuesSource<T> extends GroupSelector<T> {
|
|||
private SortedDocValues values;
|
||||
private int ord;
|
||||
|
||||
Keyword(String field) throws IOException {
|
||||
Keyword(String field) {
|
||||
super(field);
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ public final class CollapsingTopDocsCollector<T> extends FirstPassGroupingCollec
|
|||
private final boolean trackMaxScore;
|
||||
|
||||
CollapsingTopDocsCollector(GroupSelector<T> groupSelector, String collapseField, Sort sort,
|
||||
int topN, boolean trackMaxScore) throws IOException {
|
||||
int topN, boolean trackMaxScore) {
|
||||
super(groupSelector, sort, topN);
|
||||
this.collapseField = collapseField;
|
||||
this.trackMaxScore = trackMaxScore;
|
||||
|
@ -60,7 +60,7 @@ public final class CollapsingTopDocsCollector<T> extends FirstPassGroupingCollec
|
|||
|
||||
/**
|
||||
* Transform {@link FirstPassGroupingCollector#getTopGroups(int, boolean)} output in
|
||||
* {@link CollapseTopFieldDocs}. The collapsing needs only one pass so we can create the final top docs at the end
|
||||
* {@link CollapseTopFieldDocs}. The collapsing needs only one pass so we can get the final top docs at the end
|
||||
* of the first pass.
|
||||
*/
|
||||
public CollapseTopFieldDocs getTopDocs() throws IOException {
|
||||
|
@ -132,10 +132,9 @@ public final class CollapsingTopDocsCollector<T> extends FirstPassGroupingCollec
|
|||
* This must be non-null, ie, if you want to groupSort by relevance
|
||||
* use Sort.RELEVANCE.
|
||||
* @param topN How many top groups to keep.
|
||||
* @throws IOException When I/O related errors occur
|
||||
*/
|
||||
public static CollapsingTopDocsCollector<?> createNumeric(String collapseField, Sort sort,
|
||||
int topN, boolean trackMaxScore) throws IOException {
|
||||
int topN, boolean trackMaxScore) {
|
||||
return new CollapsingTopDocsCollector<>(new CollapsingDocValuesSource.Numeric(collapseField),
|
||||
collapseField, sort, topN, trackMaxScore);
|
||||
}
|
||||
|
@ -152,12 +151,10 @@ public final class CollapsingTopDocsCollector<T> extends FirstPassGroupingCollec
|
|||
* document per collapsed key.
|
||||
* This must be non-null, ie, if you want to groupSort by relevance use Sort.RELEVANCE.
|
||||
* @param topN How many top groups to keep.
|
||||
* @throws IOException When I/O related errors occur
|
||||
*/
|
||||
public static CollapsingTopDocsCollector<?> createKeyword(String collapseField, Sort sort,
|
||||
int topN, boolean trackMaxScore) throws IOException {
|
||||
int topN, boolean trackMaxScore) {
|
||||
return new CollapsingTopDocsCollector<>(new CollapsingDocValuesSource.Keyword(collapseField),
|
||||
collapseField, sort, topN, trackMaxScore);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.search.postingshighlight;
|
||||
|
||||
import org.apache.lucene.search.highlight.Snippet;
|
||||
import org.apache.lucene.search.highlight.Encoder;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightUtils;
|
||||
|
||||
/**
|
||||
Custom passage formatter that allows us to:
|
||||
1) extract different snippets (instead of a single big string) together with their scores ({@link Snippet})
|
||||
2) use the {@link Encoder} implementations that are already used with the other highlighters
|
||||
*/
|
||||
public class CustomPassageFormatter extends PassageFormatter {
|
||||
|
||||
private final String preTag;
|
||||
private final String postTag;
|
||||
private final Encoder encoder;
|
||||
|
||||
public CustomPassageFormatter(String preTag, String postTag, Encoder encoder) {
|
||||
this.preTag = preTag;
|
||||
this.postTag = postTag;
|
||||
this.encoder = encoder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Snippet[] format(Passage[] passages, String content) {
|
||||
Snippet[] snippets = new Snippet[passages.length];
|
||||
int pos;
|
||||
for (int j = 0; j < passages.length; j++) {
|
||||
Passage passage = passages[j];
|
||||
StringBuilder sb = new StringBuilder();
|
||||
pos = passage.getStartOffset();
|
||||
for (int i = 0; i < passage.getNumMatches(); i++) {
|
||||
int start = passage.getMatchStarts()[i];
|
||||
int end = passage.getMatchEnds()[i];
|
||||
// its possible to have overlapping terms
|
||||
if (start > pos) {
|
||||
append(sb, content, pos, start);
|
||||
}
|
||||
if (end > pos) {
|
||||
sb.append(preTag);
|
||||
append(sb, content, Math.max(pos, start), end);
|
||||
sb.append(postTag);
|
||||
pos = end;
|
||||
}
|
||||
}
|
||||
// its possible a "term" from the analyzer could span a sentence boundary.
|
||||
append(sb, content, pos, Math.max(pos, passage.getEndOffset()));
|
||||
//we remove the paragraph separator if present at the end of the snippet (we used it as separator between values)
|
||||
if (sb.charAt(sb.length() - 1) == HighlightUtils.PARAGRAPH_SEPARATOR) {
|
||||
sb.deleteCharAt(sb.length() - 1);
|
||||
} else if (sb.charAt(sb.length() - 1) == HighlightUtils.NULL_SEPARATOR) {
|
||||
sb.deleteCharAt(sb.length() - 1);
|
||||
}
|
||||
//and we trim the snippets too
|
||||
snippets[j] = new Snippet(sb.toString().trim(), passage.getScore(), passage.getNumMatches() > 0);
|
||||
}
|
||||
return snippets;
|
||||
}
|
||||
|
||||
protected void append(StringBuilder dest, String content, int start, int end) {
|
||||
dest.append(encoder.encodeText(content.substring(start, end)));
|
||||
}
|
||||
}
|
|
@ -1,138 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.search.postingshighlight;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.highlight.Snippet;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.BreakIterator;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Subclass of the {@link PostingsHighlighter} that works for a single field in a single document.
|
||||
* Uses a custom {@link PassageFormatter}. Accepts field content as a constructor argument, given that loading
|
||||
* is custom and can be done reading from _source field. Supports using different {@link BreakIterator} to break
|
||||
* the text into fragments. Considers every distinct field value as a discrete passage for highlighting (unless
|
||||
* the whole content needs to be highlighted). Supports both returning empty snippets and non highlighted snippets
|
||||
* when no highlighting can be performed.
|
||||
*
|
||||
* The use that we make of the postings highlighter is not optimal. It would be much better to highlight
|
||||
* multiple docs in a single call, as we actually lose its sequential IO. That would require to
|
||||
* refactor the elasticsearch highlight api which currently works per hit.
|
||||
*/
|
||||
public final class CustomPostingsHighlighter extends PostingsHighlighter {
|
||||
|
||||
private static final Snippet[] EMPTY_SNIPPET = new Snippet[0];
|
||||
private static final Passage[] EMPTY_PASSAGE = new Passage[0];
|
||||
|
||||
private final Analyzer analyzer;
|
||||
private final CustomPassageFormatter passageFormatter;
|
||||
private final BreakIterator breakIterator;
|
||||
private final boolean returnNonHighlightedSnippets;
|
||||
private final String fieldValue;
|
||||
|
||||
/**
|
||||
* Creates a new instance of {@link CustomPostingsHighlighter}
|
||||
*
|
||||
* @param analyzer the analyzer used for the field at index time, used for multi term queries internally
|
||||
* @param passageFormatter our own {@link PassageFormatter} which generates snippets in forms of {@link Snippet} objects
|
||||
* @param fieldValue the original field values as constructor argument, loaded from te _source field or the relevant stored field.
|
||||
* @param returnNonHighlightedSnippets whether non highlighted snippets should be returned rather than empty snippets when
|
||||
* no highlighting can be performed
|
||||
*/
|
||||
public CustomPostingsHighlighter(Analyzer analyzer, CustomPassageFormatter passageFormatter, String fieldValue, boolean returnNonHighlightedSnippets) {
|
||||
this(analyzer, passageFormatter, null, fieldValue, returnNonHighlightedSnippets);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new instance of {@link CustomPostingsHighlighter}
|
||||
*
|
||||
* @param analyzer the analyzer used for the field at index time, used for multi term queries internally
|
||||
* @param passageFormatter our own {@link PassageFormatter} which generates snippets in forms of {@link Snippet} objects
|
||||
* @param breakIterator an instance {@link BreakIterator} selected depending on the highlighting options
|
||||
* @param fieldValue the original field values as constructor argument, loaded from te _source field or the relevant stored field.
|
||||
* @param returnNonHighlightedSnippets whether non highlighted snippets should be returned rather than empty snippets when
|
||||
* no highlighting can be performed
|
||||
*/
|
||||
public CustomPostingsHighlighter(Analyzer analyzer, CustomPassageFormatter passageFormatter, BreakIterator breakIterator, String fieldValue, boolean returnNonHighlightedSnippets) {
|
||||
this.analyzer = analyzer;
|
||||
this.passageFormatter = passageFormatter;
|
||||
this.breakIterator = breakIterator;
|
||||
this.returnNonHighlightedSnippets = returnNonHighlightedSnippets;
|
||||
this.fieldValue = fieldValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Highlights terms extracted from the provided query within the content of the provided field name
|
||||
*/
|
||||
public Snippet[] highlightField(String field, Query query, IndexSearcher searcher, int docId, int maxPassages) throws IOException {
|
||||
Map<String, Object[]> fieldsAsObjects = super.highlightFieldsAsObjects(new String[]{field}, query, searcher, new int[]{docId}, new int[]{maxPassages});
|
||||
Object[] snippetObjects = fieldsAsObjects.get(field);
|
||||
if (snippetObjects != null) {
|
||||
//one single document at a time
|
||||
assert snippetObjects.length == 1;
|
||||
Object snippetObject = snippetObjects[0];
|
||||
if (snippetObject != null && snippetObject instanceof Snippet[]) {
|
||||
return (Snippet[]) snippetObject;
|
||||
}
|
||||
}
|
||||
return EMPTY_SNIPPET;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PassageFormatter getFormatter(String field) {
|
||||
return passageFormatter;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BreakIterator getBreakIterator(String field) {
|
||||
if (breakIterator == null) {
|
||||
return super.getBreakIterator(field);
|
||||
}
|
||||
return breakIterator;
|
||||
}
|
||||
|
||||
/*
|
||||
By default the postings highlighter returns non highlighted snippet when there are no matches.
|
||||
We want to return no snippets by default, unless no_match_size is greater than 0
|
||||
*/
|
||||
@Override
|
||||
protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
|
||||
if (returnNonHighlightedSnippets) {
|
||||
//we want to return the first sentence of the first snippet only
|
||||
return super.getEmptyHighlight(fieldName, bi, 1);
|
||||
}
|
||||
return EMPTY_PASSAGE;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Analyzer getIndexAnalyzer(String field) {
|
||||
return analyzer;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
|
||||
//we only highlight one field, one document at a time
|
||||
return new String[][]{new String[]{fieldValue}};
|
||||
}
|
||||
}
|
|
@ -20,7 +20,6 @@
|
|||
package org.apache.lucene.search.uhighlight;
|
||||
|
||||
import org.apache.lucene.search.highlight.Encoder;
|
||||
import org.apache.lucene.search.highlight.Snippet;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightUtils;
|
||||
|
||||
/**
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.highlight.Snippet;
|
||||
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
|
||||
import org.apache.lucene.search.spans.SpanNearQuery;
|
||||
import org.apache.lucene.search.spans.SpanOrQuery;
|
||||
|
@ -182,13 +181,16 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter {
|
|||
positionSpanQueries[i] = innerQueries[0];
|
||||
}
|
||||
}
|
||||
|
||||
if (positionSpanQueries.length == 1) {
|
||||
return Collections.singletonList(positionSpanQueries[0]);
|
||||
}
|
||||
// sum position increments beyond 1
|
||||
int positionGaps = 0;
|
||||
if (positions.length >= 2) {
|
||||
// positions are in increasing order. max(0,...) is just a safeguard.
|
||||
positionGaps = Math.max(0, positions[positions.length - 1] - positions[0] - positions.length + 1);
|
||||
}
|
||||
|
||||
//if original slop is 0 then require inOrder
|
||||
boolean inorder = (mpq.getSlop() == 0);
|
||||
return Collections.singletonList(new SpanNearQuery(positionSpanQueries,
|
||||
|
|
|
@ -17,11 +17,11 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.search.highlight;
|
||||
package org.apache.lucene.search.uhighlight;
|
||||
|
||||
/**
|
||||
* Represents a scored highlighted snippet.
|
||||
* It's our own arbitrary object that we get back from the postings highlighter when highlighting a document.
|
||||
* It's our own arbitrary object that we get back from the unified highlighter when highlighting a document.
|
||||
* Every snippet contains its formatted text and its score.
|
||||
* The score is needed in case we want to sort snippets by score, they get sorted by position in the text by default.
|
||||
*/
|
|
@ -765,8 +765,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
org.elasticsearch.search.SearchContextMissingException::new, 24, UNKNOWN_VERSION_ADDED),
|
||||
GENERAL_SCRIPT_EXCEPTION(org.elasticsearch.script.GeneralScriptException.class,
|
||||
org.elasticsearch.script.GeneralScriptException::new, 25, UNKNOWN_VERSION_ADDED),
|
||||
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class,
|
||||
org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26, UNKNOWN_VERSION_ADDED),
|
||||
// 26 was BatchOperationException
|
||||
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class,
|
||||
org.elasticsearch.snapshots.SnapshotCreationException::new, 27, UNKNOWN_VERSION_ADDED),
|
||||
DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class, // deprecated in 6.0, remove in 7.0
|
||||
|
@ -830,8 +829,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
org.elasticsearch.transport.SendRequestTransportException::new, 58, UNKNOWN_VERSION_ADDED),
|
||||
ES_REJECTED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class,
|
||||
org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59, UNKNOWN_VERSION_ADDED),
|
||||
EARLY_TERMINATION_EXCEPTION(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class,
|
||||
org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60, UNKNOWN_VERSION_ADDED),
|
||||
// 60 used to be for EarlyTerminationException
|
||||
// 61 used to be for RoutingValidationException
|
||||
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class,
|
||||
org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62, UNKNOWN_VERSION_ADDED),
|
||||
|
|
|
@ -72,19 +72,26 @@ public class Version implements Comparable<Version> {
|
|||
public static final Version V_5_3_1 = new Version(V_5_3_1_ID, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
||||
public static final int V_5_3_2_ID = 5030299;
|
||||
public static final Version V_5_3_2 = new Version(V_5_3_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
||||
public static final int V_5_3_3_ID = 5030399;
|
||||
public static final Version V_5_3_3 = new Version(V_5_3_3_ID, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
||||
public static final int V_5_4_0_ID = 5040099;
|
||||
public static final Version V_5_4_0 = new Version(V_5_4_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||
public static final int V_5_4_1_ID = 5040199;
|
||||
public static final Version V_5_4_1 = new Version(V_5_4_1_ID, org.apache.lucene.util.Version.LUCENE_6_5_1);
|
||||
public static final int V_5_5_0_ID = 5050099;
|
||||
public static final Version V_5_5_0 = new Version(V_5_5_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_1);
|
||||
public static final Version V_5_5_0 = new Version(V_5_5_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0);
|
||||
public static final int V_5_6_0_ID = 5060099;
|
||||
public static final Version V_5_6_0 = new Version(V_5_6_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0);
|
||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||
public static final Version V_6_0_0_alpha1 =
|
||||
new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
public static final int V_6_0_0_alpha2_ID = 6000002;
|
||||
public static final Version V_6_0_0_alpha2 =
|
||||
new Version(V_6_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
public static final Version CURRENT = V_6_0_0_alpha2;
|
||||
public static final int V_6_0_0_alpha3_ID = 6000003;
|
||||
public static final Version V_6_0_0_alpha3 =
|
||||
new Version(V_6_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
public static final Version CURRENT = V_6_0_0_alpha3;
|
||||
|
||||
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
|
||||
|
||||
|
@ -99,16 +106,22 @@ public class Version implements Comparable<Version> {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_6_0_0_alpha3_ID:
|
||||
return V_6_0_0_alpha3;
|
||||
case V_6_0_0_alpha2_ID:
|
||||
return V_6_0_0_alpha2;
|
||||
case V_6_0_0_alpha1_ID:
|
||||
return V_6_0_0_alpha1;
|
||||
case V_5_6_0_ID:
|
||||
return V_5_6_0;
|
||||
case V_5_5_0_ID:
|
||||
return V_5_5_0;
|
||||
case V_5_4_1_ID:
|
||||
return V_5_4_1;
|
||||
case V_5_4_0_ID:
|
||||
return V_5_4_0;
|
||||
case V_5_3_3_ID:
|
||||
return V_5_3_3;
|
||||
case V_5_3_2_ID:
|
||||
return V_5_3_2;
|
||||
case V_5_3_1_ID:
|
||||
|
|
|
@ -37,6 +37,8 @@ import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction;
|
|||
import org.elasticsearch.action.admin.cluster.node.tasks.get.TransportGetTaskAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.usage.TransportNodesUsageAction;
|
||||
import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction;
|
||||
import org.elasticsearch.action.admin.cluster.remote.TransportRemoteInfoAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
||||
|
@ -234,6 +236,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestListTasksAction;
|
|||
import org.elasticsearch.rest.action.admin.cluster.RestNodesHotThreadsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestNodesInfoAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestNodesStatsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestNodesUsageAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestPendingClusterTasksAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestPutRepositoryAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestPutStoredScriptAction;
|
||||
|
@ -250,6 +253,9 @@ import org.elasticsearch.rest.action.admin.indices.RestDeleteIndexTemplateAction
|
|||
import org.elasticsearch.rest.action.admin.indices.RestFlushAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestForceMergeAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestGetAllAliasesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestGetAllMappingsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestGetAllSettingsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction;
|
||||
|
@ -269,7 +275,6 @@ import org.elasticsearch.rest.action.admin.indices.RestRefreshAction;
|
|||
import org.elasticsearch.rest.action.admin.indices.RestRolloverIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestShrinkIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestTypesExistsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestUpgradeAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestValidateQueryAction;
|
||||
|
@ -310,6 +315,7 @@ import org.elasticsearch.rest.action.search.RestMultiSearchAction;
|
|||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.rest.action.search.RestSearchScrollAction;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.usage.UsageService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
@ -346,7 +352,7 @@ public class ActionModule extends AbstractModule {
|
|||
public ActionModule(boolean transportClient, Settings settings, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
IndexScopedSettings indexScopedSettings, ClusterSettings clusterSettings, SettingsFilter settingsFilter,
|
||||
ThreadPool threadPool, List<ActionPlugin> actionPlugins, NodeClient nodeClient,
|
||||
CircuitBreakerService circuitBreakerService) {
|
||||
CircuitBreakerService circuitBreakerService, UsageService usageService) {
|
||||
this.transportClient = transportClient;
|
||||
this.settings = settings;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
|
@ -373,7 +379,7 @@ public class ActionModule extends AbstractModule {
|
|||
if (transportClient) {
|
||||
restController = null;
|
||||
} else {
|
||||
restController = new RestController(settings, headers, restWrapper, nodeClient, circuitBreakerService);
|
||||
restController = new RestController(settings, headers, restWrapper, nodeClient, circuitBreakerService, usageService);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -405,6 +411,7 @@ public class ActionModule extends AbstractModule {
|
|||
actions.register(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
|
||||
actions.register(RemoteInfoAction.INSTANCE, TransportRemoteInfoAction.class);
|
||||
actions.register(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
|
||||
actions.register(NodesUsageAction.INSTANCE, TransportNodesUsageAction.class);
|
||||
actions.register(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
|
||||
actions.register(ListTasksAction.INSTANCE, TransportListTasksAction.class);
|
||||
actions.register(GetTaskAction.INSTANCE, TransportGetTaskAction.class);
|
||||
|
@ -515,6 +522,7 @@ public class ActionModule extends AbstractModule {
|
|||
registerHandler.accept(new RestNodesInfoAction(settings, restController, settingsFilter));
|
||||
registerHandler.accept(new RestRemoteClusterInfoAction(settings, restController));
|
||||
registerHandler.accept(new RestNodesStatsAction(settings, restController));
|
||||
registerHandler.accept(new RestNodesUsageAction(settings, restController));
|
||||
registerHandler.accept(new RestNodesHotThreadsAction(settings, restController));
|
||||
registerHandler.accept(new RestClusterAllocationExplainAction(settings, restController));
|
||||
registerHandler.accept(new RestClusterStatsAction(settings, restController));
|
||||
|
@ -535,7 +543,9 @@ public class ActionModule extends AbstractModule {
|
|||
registerHandler.accept(new RestDeleteSnapshotAction(settings, restController));
|
||||
registerHandler.accept(new RestSnapshotsStatusAction(settings, restController));
|
||||
|
||||
registerHandler.accept(new RestTypesExistsAction(settings, restController));
|
||||
registerHandler.accept(new RestGetAllAliasesAction(settings, restController));
|
||||
registerHandler.accept(new RestGetAllMappingsAction(settings, restController));
|
||||
registerHandler.accept(new RestGetAllSettingsAction(settings, restController, indexScopedSettings, settingsFilter));
|
||||
registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter));
|
||||
registerHandler.accept(new RestIndicesStatsAction(settings, restController));
|
||||
registerHandler.accept(new RestIndicesSegmentsAction(settings, restController));
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.action.FailedNodeException;
|
|||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
import org.elasticsearch.action.support.nodes.TransportNodesAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -82,11 +81,6 @@ public class TransportNodesHotThreadsAction extends TransportNodesAction<NodesHo
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public static class NodeRequest extends BaseNodeRequest {
|
||||
|
||||
NodesHotThreadsRequest request;
|
||||
|
|
|
@ -76,11 +76,6 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
|||
request.transport(), request.http(), request.plugins(), request.ingest(), request.indices());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public static class NodeInfoRequest extends BaseNodeRequest {
|
||||
|
||||
NodesInfoRequest request;
|
||||
|
|
|
@ -76,11 +76,6 @@ public class TransportNodesStatsAction extends TransportNodesAction<NodesStatsRe
|
|||
request.ingest());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public static class NodeStatsRequest extends BaseNodeRequest {
|
||||
|
||||
NodesStatsRequest request;
|
||||
|
|
|
@ -167,12 +167,6 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return true;
|
||||
}
|
||||
|
||||
private void setBanOnNodes(String reason, CancellableTask task, DiscoveryNodes nodes, ActionListener<Void> listener) {
|
||||
sendSetBanRequest(nodes,
|
||||
BanParentTaskRequest.createSetBanParentTaskRequest(new TaskId(clusterService.localNode().getId(), task.getId()), reason),
|
||||
|
|
|
@ -90,8 +90,4 @@ public class TransportListTasksAction extends TransportTasksAction<Task, ListTas
|
|||
super.processTasks(request, operation);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.usage;
|
||||
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
public class NodeUsage extends BaseNodeResponse implements ToXContent {
|
||||
|
||||
private long timestamp;
|
||||
private long sinceTime;
|
||||
private Map<String, Long> restUsage;
|
||||
|
||||
NodeUsage() {
|
||||
}
|
||||
|
||||
public static NodeUsage readNodeStats(StreamInput in) throws IOException {
|
||||
NodeUsage nodeInfo = new NodeUsage();
|
||||
nodeInfo.readFrom(in);
|
||||
return nodeInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param node
|
||||
* the node these statistics were collected from
|
||||
* @param timestamp
|
||||
* the timestamp for when these statistics were collected
|
||||
* @param sinceTime
|
||||
* the timestamp for when the collection of these statistics
|
||||
* started
|
||||
* @param restUsage
|
||||
* a map containing the counts of the number of times each REST
|
||||
* endpoint has been called
|
||||
*/
|
||||
public NodeUsage(DiscoveryNode node, long timestamp, long sinceTime, Map<String, Long> restUsage) {
|
||||
super(node);
|
||||
this.timestamp = timestamp;
|
||||
this.sinceTime = sinceTime;
|
||||
this.restUsage = restUsage;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the timestamp for when these statistics were collected
|
||||
*/
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the timestamp for when the collection of these statistics started
|
||||
*/
|
||||
public long getSinceTime() {
|
||||
return sinceTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a map containing the counts of the number of times each REST
|
||||
* endpoint has been called
|
||||
*/
|
||||
public Map<String, Long> getRestUsage() {
|
||||
return restUsage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("since", sinceTime);
|
||||
if (restUsage != null) {
|
||||
builder.field("rest_actions");
|
||||
builder.map(restUsage);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
timestamp = in.readLong();
|
||||
sinceTime = in.readLong();
|
||||
restUsage = (Map<String, Long>) in.readGenericValue();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeLong(timestamp);
|
||||
out.writeLong(sinceTime);
|
||||
out.writeGenericValue(restUsage);
|
||||
}
|
||||
|
||||
}
|
|
@ -17,20 +17,28 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.analysis;
|
||||
package org.elasticsearch.action.admin.cluster.node.usage;
|
||||
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
import java.io.Reader;
|
||||
public class NodesUsageAction extends Action<NodesUsageRequest, NodesUsageResponse, NodesUsageRequestBuilder> {
|
||||
|
||||
public class DummyCharFilterFactory implements CharFilterFactory {
|
||||
@Override
|
||||
public String name() {
|
||||
return "dummy_char_filter";
|
||||
public static final NodesUsageAction INSTANCE = new NodesUsageAction();
|
||||
public static final String NAME = "cluster:monitor/nodes/usage";
|
||||
|
||||
protected NodesUsageAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Reader create(Reader reader) {
|
||||
return null;
|
||||
public NodesUsageRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new NodesUsageRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodesUsageResponse newResponse() {
|
||||
return new NodesUsageResponse();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.usage;
|
||||
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class NodesUsageRequest extends BaseNodesRequest<NodesUsageRequest> {
|
||||
|
||||
private boolean restActions;
|
||||
|
||||
public NodesUsageRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get usage from nodes based on the nodes ids specified. If none are
|
||||
* passed, usage for all nodes will be returned.
|
||||
*/
|
||||
public NodesUsageRequest(String... nodesIds) {
|
||||
super(nodesIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets all the request flags.
|
||||
*/
|
||||
public NodesUsageRequest all() {
|
||||
this.restActions = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all the request flags.
|
||||
*/
|
||||
public NodesUsageRequest clear() {
|
||||
this.restActions = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the node rest actions usage statistics be returned.
|
||||
*/
|
||||
public boolean restActions() {
|
||||
return this.restActions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the node rest actions usage statistics be returned.
|
||||
*/
|
||||
public NodesUsageRequest restActions(boolean restActions) {
|
||||
this.restActions = restActions;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
this.restActions = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(restActions);
|
||||
}
|
||||
}
|
|
@ -17,24 +17,18 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.analysis;
|
||||
package org.elasticsearch.action.admin.cluster.node.usage;
|
||||
|
||||
import org.elasticsearch.index.analysis.AnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.AnalyzerScope;
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class DummyAnalyzerProvider implements AnalyzerProvider<DummyAnalyzer> {
|
||||
@Override
|
||||
public String name() {
|
||||
return "dummy";
|
||||
public class NodesUsageRequestBuilder
|
||||
extends NodesOperationRequestBuilder<NodesUsageRequest, NodesUsageResponse, NodesUsageRequestBuilder> {
|
||||
|
||||
public NodesUsageRequestBuilder(ElasticsearchClient client,
|
||||
Action<NodesUsageRequest, NodesUsageResponse, NodesUsageRequestBuilder> action) {
|
||||
super(client, action, new NodesUsageRequest());
|
||||
}
|
||||
|
||||
@Override
|
||||
public AnalyzerScope scope() {
|
||||
return AnalyzerScope.INDICES;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DummyAnalyzer get() {
|
||||
return new DummyAnalyzer();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.usage;
|
||||
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* The response for the nodes usage api which contains the individual usage
|
||||
* statistics for all nodes queried.
|
||||
*/
|
||||
public class NodesUsageResponse extends BaseNodesResponse<NodeUsage> implements ToXContent {
|
||||
|
||||
NodesUsageResponse() {
|
||||
}
|
||||
|
||||
public NodesUsageResponse(ClusterName clusterName, List<NodeUsage> nodes, List<FailedNodeException> failures) {
|
||||
super(clusterName, nodes, failures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<NodeUsage> readNodesFrom(StreamInput in) throws IOException {
|
||||
return in.readList(NodeUsage::readNodeStats);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeNodesTo(StreamOutput out, List<NodeUsage> nodes) throws IOException {
|
||||
out.writeStreamableList(nodes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject("nodes");
|
||||
for (NodeUsage nodeUsage : getNodes()) {
|
||||
builder.startObject(nodeUsage.getNode().getId());
|
||||
builder.field("timestamp", nodeUsage.getTimestamp());
|
||||
nodeUsage.toXContent(builder, params);
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
|
||||
builder.startObject();
|
||||
toXContent(builder, EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
return builder.string();
|
||||
} catch (IOException e) {
|
||||
return "{ \"error\" : \"" + e.getMessage() + "\"}";
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.usage;
|
||||
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
import org.elasticsearch.action.support.nodes.TransportNodesAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.usage.UsageService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
public class TransportNodesUsageAction
|
||||
extends TransportNodesAction<NodesUsageRequest, NodesUsageResponse, TransportNodesUsageAction.NodeUsageRequest, NodeUsage> {
|
||||
|
||||
private UsageService usageService;
|
||||
|
||||
@Inject
|
||||
public TransportNodesUsageAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, UsageService usageService) {
|
||||
super(settings, NodesUsageAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
NodesUsageRequest::new, NodeUsageRequest::new, ThreadPool.Names.MANAGEMENT, NodeUsage.class);
|
||||
this.usageService = usageService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodesUsageResponse newResponse(NodesUsageRequest request, List<NodeUsage> responses, List<FailedNodeException> failures) {
|
||||
return new NodesUsageResponse(clusterService.getClusterName(), responses, failures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeUsageRequest newNodeRequest(String nodeId, NodesUsageRequest request) {
|
||||
return new NodeUsageRequest(nodeId, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeUsage newNodeResponse() {
|
||||
return new NodeUsage();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeUsage nodeOperation(NodeUsageRequest nodeUsageRequest) {
|
||||
NodesUsageRequest request = nodeUsageRequest.request;
|
||||
return usageService.getUsageStats(clusterService.localNode(), request.restActions());
|
||||
}
|
||||
|
||||
public static class NodeUsageRequest extends BaseNodeRequest {
|
||||
|
||||
NodesUsageRequest request;
|
||||
|
||||
public NodeUsageRequest() {
|
||||
}
|
||||
|
||||
NodeUsageRequest(String nodeId, NodesUsageRequest request) {
|
||||
super(nodeId);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
request = new NodesUsageRequest();
|
||||
request.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
request.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -122,11 +122,6 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public static class Request extends BaseNodesRequest<Request> {
|
||||
|
||||
private Snapshot[] snapshots;
|
||||
|
|
|
@ -118,11 +118,6 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public static class ClusterStatsNodeRequest extends BaseNodeRequest {
|
||||
|
||||
ClusterStatsRequest request;
|
||||
|
|
|
@ -81,7 +81,7 @@ public class GetStoredScriptResponse extends ActionResponse implements ToXConten
|
|||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
source.writeTo(out);
|
||||
} else {
|
||||
out.writeString(source.getCode());
|
||||
out.writeString(source.getSource());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
|||
|
||||
private String id;
|
||||
private String lang;
|
||||
private String context;
|
||||
private BytesReference content;
|
||||
private XContentType xContentType;
|
||||
|
||||
|
@ -45,10 +46,11 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
|||
super();
|
||||
}
|
||||
|
||||
public PutStoredScriptRequest(String id, String lang, BytesReference content, XContentType xContentType) {
|
||||
public PutStoredScriptRequest(String id, String lang, String context, BytesReference content, XContentType xContentType) {
|
||||
super();
|
||||
this.id = id;
|
||||
this.lang = lang;
|
||||
this.context = context;
|
||||
this.content = content;
|
||||
this.xContentType = Objects.requireNonNull(xContentType);
|
||||
}
|
||||
|
@ -94,6 +96,15 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
|||
return this;
|
||||
}
|
||||
|
||||
public String context() {
|
||||
return context;
|
||||
}
|
||||
|
||||
public PutStoredScriptRequest context(String context) {
|
||||
this.context = context;
|
||||
return this;
|
||||
}
|
||||
|
||||
public BytesReference content() {
|
||||
return content;
|
||||
}
|
||||
|
@ -128,6 +139,9 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
|||
} else {
|
||||
xContentType = XContentFactory.xContentType(content);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) {
|
||||
context = in.readOptionalString();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -140,6 +154,9 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
|||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
xContentType.writeTo(out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) {
|
||||
out.writeOptionalString(context);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -59,9 +59,10 @@ import static org.elasticsearch.common.xcontent.ObjectParser.fromList;
|
|||
public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesRequest> {
|
||||
private List<AliasActions> allAliasActions = new ArrayList<>();
|
||||
|
||||
//indices options that require every specified index to exist, expand wildcards only to open indices and
|
||||
//don't allow that no indices are resolved from wildcard expressions
|
||||
private static final IndicesOptions INDICES_OPTIONS = IndicesOptions.fromOptions(false, false, true, false);
|
||||
// indices options that require every specified index to exist, expand wildcards only to open
|
||||
// indices, don't allow that no indices are resolved from wildcard expressions and resolve the
|
||||
// expressions only against indices
|
||||
private static final IndicesOptions INDICES_OPTIONS = IndicesOptions.fromOptions(false, false, true, false, true, false, true);
|
||||
|
||||
public IndicesAliasesRequest() {
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.FastStringReader;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -179,7 +180,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
|
||||
} else if (request.tokenizer() != null) {
|
||||
final IndexSettings indexSettings = indexAnalyzers == null ? null : indexAnalyzers.getIndexSettings();
|
||||
TokenizerFactory tokenizerFactory = parseTokenizerFactory(request, indexAnalyzers, analysisRegistry, environment);
|
||||
Tuple<String, TokenizerFactory> tokenizerFactory = parseTokenizerFactory(request, indexAnalyzers,
|
||||
analysisRegistry, environment);
|
||||
|
||||
TokenFilterFactory[] tokenFilterFactories = new TokenFilterFactory[0];
|
||||
tokenFilterFactories = getTokenFilterFactories(request, indexSettings, analysisRegistry, environment, tokenFilterFactories);
|
||||
|
@ -187,7 +189,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
CharFilterFactory[] charFilterFactories = new CharFilterFactory[0];
|
||||
charFilterFactories = getCharFilterFactories(request, indexSettings, analysisRegistry, environment, charFilterFactories);
|
||||
|
||||
analyzer = new CustomAnalyzer(tokenizerFactory, charFilterFactories, tokenFilterFactories);
|
||||
analyzer = new CustomAnalyzer(tokenizerFactory.v1(), tokenizerFactory.v2(), charFilterFactories, tokenFilterFactories);
|
||||
closeAnalyzer = true;
|
||||
} else if (analyzer == null) {
|
||||
if (indexAnalyzers == null) {
|
||||
|
@ -325,7 +327,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
tokenFilterFactories[tokenFilterIndex].name(), tokenFiltersTokenListCreator[tokenFilterIndex].getArrayTokens());
|
||||
}
|
||||
}
|
||||
detailResponse = new DetailAnalyzeResponse(charFilteredLists, new DetailAnalyzeResponse.AnalyzeTokenList(tokenizerFactory.name(), tokenizerTokenListCreator.getArrayTokens()), tokenFilterLists);
|
||||
detailResponse = new DetailAnalyzeResponse(charFilteredLists, new DetailAnalyzeResponse.AnalyzeTokenList(
|
||||
customAnalyzer.getTokenizerName(), tokenizerTokenListCreator.getArrayTokens()), tokenFilterLists);
|
||||
} else {
|
||||
String name;
|
||||
if (analyzer instanceof NamedAnalyzer) {
|
||||
|
@ -551,8 +554,9 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
return tokenFilterFactories;
|
||||
}
|
||||
|
||||
private static TokenizerFactory parseTokenizerFactory(AnalyzeRequest request, IndexAnalyzers indexAnalzyers,
|
||||
private static Tuple<String, TokenizerFactory> parseTokenizerFactory(AnalyzeRequest request, IndexAnalyzers indexAnalzyers,
|
||||
AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
|
||||
String name;
|
||||
TokenizerFactory tokenizerFactory;
|
||||
final AnalyzeRequest.NameOrDefinition tokenizer = request.tokenizer();
|
||||
// parse anonymous settings
|
||||
|
@ -568,6 +572,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizerTypeName + "]");
|
||||
}
|
||||
// Need to set anonymous "name" of tokenizer
|
||||
name = "_anonymous_tokenizer";
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenizer", settings);
|
||||
} else {
|
||||
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory;
|
||||
|
@ -576,18 +581,20 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
if (tokenizerFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizer.name + "]");
|
||||
}
|
||||
name = tokenizer.name;
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(environment, tokenizer.name);
|
||||
} else {
|
||||
tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name, indexAnalzyers.getIndexSettings());
|
||||
if (tokenizerFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find tokenizer under [" + tokenizer.name + "]");
|
||||
}
|
||||
name = tokenizer.name;
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(indexAnalzyers.getIndexSettings(), environment, tokenizer.name,
|
||||
AnalysisRegistry.getSettingsFromIndexSettings(indexAnalzyers.getIndexSettings(),
|
||||
AnalysisRegistry.INDEX_ANALYSIS_TOKENIZER + "." + tokenizer.name));
|
||||
}
|
||||
}
|
||||
return tokenizerFactory;
|
||||
return new Tuple<>(name, tokenizerFactory);
|
||||
}
|
||||
|
||||
private static IndexSettings getNaIndexSettings(Settings settings) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.create;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -32,14 +33,16 @@ import java.io.IOException;
|
|||
public class CreateIndexResponse extends AcknowledgedResponse {
|
||||
|
||||
private boolean shardsAcked;
|
||||
private String index;
|
||||
|
||||
protected CreateIndexResponse() {
|
||||
}
|
||||
|
||||
protected CreateIndexResponse(boolean acknowledged, boolean shardsAcked) {
|
||||
protected CreateIndexResponse(boolean acknowledged, boolean shardsAcked, String index) {
|
||||
super(acknowledged);
|
||||
assert acknowledged || shardsAcked == false; // if its not acknowledged, then shards acked should be false too
|
||||
this.shardsAcked = shardsAcked;
|
||||
this.index = index;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -47,6 +50,9 @@ public class CreateIndexResponse extends AcknowledgedResponse {
|
|||
super.readFrom(in);
|
||||
readAcknowledged(in);
|
||||
shardsAcked = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_6_0)) {
|
||||
index = in.readString();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -54,6 +60,9 @@ public class CreateIndexResponse extends AcknowledgedResponse {
|
|||
super.writeTo(out);
|
||||
writeAcknowledged(out);
|
||||
out.writeBoolean(shardsAcked);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_6_0)) {
|
||||
out.writeString(index);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -65,7 +74,12 @@ public class CreateIndexResponse extends AcknowledgedResponse {
|
|||
return shardsAcked;
|
||||
}
|
||||
|
||||
public String index() {
|
||||
return index;
|
||||
}
|
||||
|
||||
public void addCustomFields(XContentBuilder builder) throws IOException {
|
||||
builder.field("shards_acknowledged", isShardsAcked());
|
||||
builder.field("index", index());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction<Create
|
|||
.waitForActiveShards(request.waitForActiveShards());
|
||||
|
||||
createIndexService.createIndex(updateRequest, ActionListener.wrap(response ->
|
||||
listener.onResponse(new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcked())),
|
||||
listener.onResponse(new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcked(), indexName)),
|
||||
listener::onFailure));
|
||||
}
|
||||
|
||||
|
|
|
@ -119,7 +119,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
@Override
|
||||
public void onResponse(IndicesStatsResponse statsResponse) {
|
||||
final Set<Condition.Result> conditionResults = evaluateConditions(rolloverRequest.getConditions(),
|
||||
statsResponse.getTotal().getDocs(), metaData.index(sourceIndexName));
|
||||
metaData.index(sourceIndexName), statsResponse);
|
||||
|
||||
if (rolloverRequest.isDryRun()) {
|
||||
listener.onResponse(
|
||||
|
@ -201,6 +201,11 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
.collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
static Set<Condition.Result> evaluateConditions(final Set<Condition> conditions, final IndexMetaData metaData,
|
||||
final IndicesStatsResponse statsResponse) {
|
||||
return evaluateConditions(conditions, statsResponse.getPrimaries().getDocs(), metaData);
|
||||
}
|
||||
|
||||
static void validate(MetaData metaData, RolloverRequest request) {
|
||||
final AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(request.getAlias());
|
||||
if (aliasOrIndex == null) {
|
||||
|
|
|
@ -25,7 +25,7 @@ public final class ShrinkResponse extends CreateIndexResponse {
|
|||
ShrinkResponse() {
|
||||
}
|
||||
|
||||
ShrinkResponse(boolean acknowledged, boolean shardsAcked) {
|
||||
super(acknowledged, shardsAcked);
|
||||
ShrinkResponse(boolean acknowledged, boolean shardsAcked, String index) {
|
||||
super(acknowledged, shardsAcked, index);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -91,8 +91,13 @@ public class TransportShrinkAction extends TransportMasterNodeAction<ShrinkReque
|
|||
IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i);
|
||||
return shard == null ? null : shard.getPrimary().getDocs();
|
||||
}, indexNameExpressionResolver);
|
||||
createIndexService.createIndex(updateRequest, ActionListener.wrap(response ->
|
||||
listener.onResponse(new ShrinkResponse(response.isAcknowledged(), response.isShardsAcked())), listener::onFailure));
|
||||
createIndexService.createIndex(
|
||||
updateRequest,
|
||||
ActionListener.wrap(response ->
|
||||
listener.onResponse(new ShrinkResponse(response.isAcknowledged(), response.isShardsAcked(), updateRequest.index())),
|
||||
listener::onFailure
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.elasticsearch.common.lucene.uid.Versions;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
|
@ -300,10 +299,16 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
if (token == null) {
|
||||
continue;
|
||||
}
|
||||
assert token == XContentParser.Token.START_OBJECT;
|
||||
if (token != XContentParser.Token.START_OBJECT) {
|
||||
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected "
|
||||
+ XContentParser.Token.START_OBJECT + " but found [" + token + "]");
|
||||
}
|
||||
// Move to FIELD_NAME, that's the action
|
||||
token = parser.nextToken();
|
||||
assert token == XContentParser.Token.FIELD_NAME;
|
||||
if (token != XContentParser.Token.FIELD_NAME) {
|
||||
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected "
|
||||
+ XContentParser.Token.FIELD_NAME + " but found [" + token + "]");
|
||||
}
|
||||
String action = parser.currentName();
|
||||
|
||||
String index = defaultIndex;
|
||||
|
|
|
@ -76,13 +76,6 @@ public class BulkResponse extends ActionResponse implements Iterable<BulkItemRes
|
|||
return new TimeValue(tookInMillis);
|
||||
}
|
||||
|
||||
/**
|
||||
* How long the bulk execution took in milliseconds. Excluding ingest preprocessing.
|
||||
*/
|
||||
public long getTookInMillis() {
|
||||
return tookInMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* If ingest is enabled returns the bulk ingest preprocessing time, otherwise 0 is returned.
|
||||
*/
|
||||
|
|
|
@ -569,9 +569,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
ActionListener<BulkResponse> wrapActionListenerIfNeeded(long ingestTookInMillis, ActionListener<BulkResponse> actionListener) {
|
||||
if (itemResponses.isEmpty()) {
|
||||
return ActionListener.wrap(
|
||||
response -> actionListener.onResponse(
|
||||
new BulkResponse(response.getItems(), response.getTookInMillis(), ingestTookInMillis)),
|
||||
actionListener::onFailure);
|
||||
response -> actionListener.onResponse(new BulkResponse(response.getItems(),
|
||||
response.getTook().getMillis(), ingestTookInMillis)),
|
||||
actionListener::onFailure);
|
||||
} else {
|
||||
return new IngestBulkResponseListener(ingestTookInMillis, originalSlots, itemResponses, actionListener);
|
||||
}
|
||||
|
@ -610,7 +610,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
for (int i = 0; i < items.length; i++) {
|
||||
itemResponses.add(originalSlots[i], response.getItems()[i]);
|
||||
}
|
||||
actionListener.onResponse(new BulkResponse(itemResponses.toArray(new BulkItemResponse[itemResponses.size()]), response.getTookInMillis(), ingestTookInMillis));
|
||||
actionListener.onResponse(new BulkResponse(
|
||||
itemResponses.toArray(new BulkItemResponse[itemResponses.size()]),
|
||||
response.getTook().getMillis(), ingestTookInMillis));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -477,7 +477,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
case FAILURE:
|
||||
final BulkItemResponse.Failure failure = item.getPrimaryResponse().getFailure();
|
||||
assert failure.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "seq no must be assigned";
|
||||
operationResult = executeFailureNoOpOnReplica(failure, replica);
|
||||
operationResult = executeFailureNoOpOnReplica(failure, primaryTerm, replica);
|
||||
assert operationResult != null : "operation result must never be null when primary response has no failure";
|
||||
location = syncOperationResultOrThrow(operationResult, location);
|
||||
break;
|
||||
|
@ -673,9 +673,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
return replica.delete(delete);
|
||||
}
|
||||
|
||||
private static Engine.NoOpResult executeFailureNoOpOnReplica(BulkItemResponse.Failure primaryFailure, IndexShard replica) throws IOException {
|
||||
final Engine.NoOp noOp = replica.prepareMarkingSeqNoAsNoOp(
|
||||
primaryFailure.getSeqNo(), primaryFailure.getMessage());
|
||||
private static Engine.NoOpResult executeFailureNoOpOnReplica(BulkItemResponse.Failure primaryFailure, long primaryTerm,
|
||||
IndexShard replica) throws IOException {
|
||||
final Engine.NoOp noOp = replica.prepareMarkingSeqNoAsNoOpOnReplica(
|
||||
primaryFailure.getSeqNo(), primaryTerm, primaryFailure.getMessage());
|
||||
return replica.markSeqNoAsNoOp(noOp);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -221,4 +222,34 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest> impleme
|
|||
public String toString() {
|
||||
return "delete {[" + index + "][" + type + "][" + id + "]}";
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
|
||||
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
|
||||
* use because the DeleteRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
|
||||
*/
|
||||
@Override
|
||||
public long primaryTerm() {
|
||||
throw new UnsupportedOperationException("primary term should never be set on DeleteRequest");
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
|
||||
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
|
||||
* use because the DeleteRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
|
||||
*/
|
||||
@Override
|
||||
public void primaryTerm(long term) {
|
||||
throw new UnsupportedOperationException("primary term should never be set on DeleteRequest");
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
|
||||
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
|
||||
* use because the DeleteRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
|
||||
*/
|
||||
@Override
|
||||
public DeleteRequest setShardId(ShardId shardId) {
|
||||
throw new UnsupportedOperationException("shard id should never be set on DeleteRequest");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
@ -608,4 +609,35 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
public long getAutoGeneratedTimestamp() {
|
||||
return autoGeneratedTimestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
|
||||
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
|
||||
* use because the IndexRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
|
||||
*/
|
||||
@Override
|
||||
public long primaryTerm() {
|
||||
throw new UnsupportedOperationException("primary term should never be set on IndexRequest");
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
|
||||
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
|
||||
* use because the IndexRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
|
||||
*/
|
||||
@Override
|
||||
public void primaryTerm(long term) {
|
||||
throw new UnsupportedOperationException("primary term should never be set on IndexRequest");
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
|
||||
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
|
||||
* use because the IndexRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
|
||||
*/
|
||||
@Override
|
||||
public IndexRequest setShardId(ShardId shardId) {
|
||||
throw new UnsupportedOperationException("shard id should never be set on IndexRequest");
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
/**
|
||||
* Base implementation for an async action.
|
||||
*/
|
||||
abstract class AbstractAsyncAction {
|
||||
|
||||
private final long startTime;
|
||||
|
||||
protected AbstractAsyncAction() { this(System.currentTimeMillis());}
|
||||
|
||||
protected AbstractAsyncAction(long startTime) {
|
||||
this.startTime = startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the time when the action started.
|
||||
*/
|
||||
protected final long startTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds how long it took to execute the search.
|
||||
*/
|
||||
protected final long buildTookInMillis() {
|
||||
// protect ourselves against time going backwards
|
||||
// negative values don't make sense and we want to be able to serialize that thing as a vLong
|
||||
return Math.max(1, System.currentTimeMillis() - startTime);
|
||||
}
|
||||
|
||||
abstract void start();
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId;
|
||||
|
||||
final class ClearScrollController implements Runnable {
|
||||
private final DiscoveryNodes nodes;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final CountDown expectedOps;
|
||||
private final ActionListener<ClearScrollResponse> listener;
|
||||
private final AtomicBoolean hasFailed = new AtomicBoolean(false);
|
||||
private final AtomicInteger freedSearchContexts = new AtomicInteger(0);
|
||||
private final Logger logger;
|
||||
private final Runnable runner;
|
||||
|
||||
ClearScrollController(ClearScrollRequest request, ActionListener<ClearScrollResponse> listener, DiscoveryNodes nodes, Logger logger,
|
||||
SearchTransportService searchTransportService) {
|
||||
this.nodes = nodes;
|
||||
this.logger = logger;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.listener = listener;
|
||||
List<String> scrollIds = request.getScrollIds();
|
||||
final int expectedOps;
|
||||
if (scrollIds.size() == 1 && "_all".equals(scrollIds.get(0))) {
|
||||
expectedOps = nodes.getSize();
|
||||
runner = this::cleanAllScrolls;
|
||||
} else {
|
||||
List<ScrollIdForNode> parsedScrollIds = new ArrayList<>();
|
||||
for (String parsedScrollId : request.getScrollIds()) {
|
||||
ScrollIdForNode[] context = parseScrollId(parsedScrollId).getContext();
|
||||
for (ScrollIdForNode id : context) {
|
||||
parsedScrollIds.add(id);
|
||||
}
|
||||
}
|
||||
if (parsedScrollIds.isEmpty()) {
|
||||
expectedOps = 0;
|
||||
runner = () -> listener.onResponse(new ClearScrollResponse(true, 0));
|
||||
} else {
|
||||
expectedOps = parsedScrollIds.size();
|
||||
runner = () -> cleanScrollIds(parsedScrollIds);
|
||||
}
|
||||
}
|
||||
this.expectedOps = new CountDown(expectedOps);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
runner.run();
|
||||
}
|
||||
|
||||
void cleanAllScrolls() {
|
||||
for (final DiscoveryNode node : nodes) {
|
||||
try {
|
||||
Transport.Connection connection = searchTransportService.getConnection(null, node);
|
||||
searchTransportService.sendClearAllScrollContexts(connection, new ActionListener<TransportResponse>() {
|
||||
@Override
|
||||
public void onResponse(TransportResponse response) {
|
||||
onFreedContext(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
onFailedFreedContext(e, node);
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
onFailedFreedContext(e, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void cleanScrollIds(List<ScrollIdForNode> parsedScrollIds) {
|
||||
SearchScrollAsyncAction.collectNodesAndRun(parsedScrollIds, nodes, searchTransportService, ActionListener.wrap(
|
||||
lookup -> {
|
||||
for (ScrollIdForNode target : parsedScrollIds) {
|
||||
final DiscoveryNode node = lookup.apply(target.getClusterAlias(), target.getNode());
|
||||
if (node == null) {
|
||||
onFreedContext(false);
|
||||
} else {
|
||||
try {
|
||||
Transport.Connection connection = searchTransportService.getConnection(target.getClusterAlias(), node);
|
||||
searchTransportService.sendFreeContext(connection, target.getScrollId(),
|
||||
ActionListener.wrap(freed -> onFreedContext(freed.isFreed()), e -> onFailedFreedContext(e, node)));
|
||||
} catch (Exception e) {
|
||||
onFailedFreedContext(e, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
}, listener::onFailure));
|
||||
}
|
||||
|
||||
private void onFreedContext(boolean freed) {
|
||||
if (freed) {
|
||||
freedSearchContexts.incrementAndGet();
|
||||
}
|
||||
if (expectedOps.countDown()) {
|
||||
boolean succeeded = hasFailed.get() == false;
|
||||
listener.onResponse(new ClearScrollResponse(succeeded, freedSearchContexts.get()));
|
||||
}
|
||||
}
|
||||
|
||||
private void onFailedFreedContext(Throwable e, DiscoveryNode node) {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e);
|
||||
if (expectedOps.countDown()) {
|
||||
listener.onResponse(new ClearScrollResponse(false, freedSearchContexts.get()));
|
||||
} else {
|
||||
hasFailed.set(true);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.search.SearchHit;
|
|||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.collapse.CollapseBuilder;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
|
@ -42,11 +43,11 @@ import java.util.function.Function;
|
|||
*/
|
||||
final class ExpandSearchPhase extends SearchPhase {
|
||||
private final SearchPhaseContext context;
|
||||
private final SearchResponse searchResponse;
|
||||
private final Function<SearchResponse, SearchPhase> nextPhaseFactory;
|
||||
private final InternalSearchResponse searchResponse;
|
||||
private final Function<InternalSearchResponse, SearchPhase> nextPhaseFactory;
|
||||
|
||||
ExpandSearchPhase(SearchPhaseContext context, SearchResponse searchResponse,
|
||||
Function<SearchResponse, SearchPhase> nextPhaseFactory) {
|
||||
ExpandSearchPhase(SearchPhaseContext context, InternalSearchResponse searchResponse,
|
||||
Function<InternalSearchResponse, SearchPhase> nextPhaseFactory) {
|
||||
super("expand");
|
||||
this.context = context;
|
||||
this.searchResponse = searchResponse;
|
||||
|
@ -65,7 +66,7 @@ final class ExpandSearchPhase extends SearchPhase {
|
|||
|
||||
@Override
|
||||
public void run() throws IOException {
|
||||
if (isCollapseRequest() && searchResponse.getHits().getHits().length > 0) {
|
||||
if (isCollapseRequest() && searchResponse.hits().getHits().length > 0) {
|
||||
SearchRequest searchRequest = context.getRequest();
|
||||
CollapseBuilder collapseBuilder = searchRequest.source().collapse();
|
||||
final List<InnerHitBuilder> innerHitBuilders = collapseBuilder.getInnerHits();
|
||||
|
@ -73,7 +74,7 @@ final class ExpandSearchPhase extends SearchPhase {
|
|||
if (collapseBuilder.getMaxConcurrentGroupRequests() > 0) {
|
||||
multiRequest.maxConcurrentSearchRequests(collapseBuilder.getMaxConcurrentGroupRequests());
|
||||
}
|
||||
for (SearchHit hit : searchResponse.getHits()) {
|
||||
for (SearchHit hit : searchResponse.hits().getHits()) {
|
||||
BoolQueryBuilder groupQuery = new BoolQueryBuilder();
|
||||
Object collapseValue = hit.field(collapseBuilder.getField()).getValue();
|
||||
if (collapseValue != null) {
|
||||
|
@ -97,7 +98,7 @@ final class ExpandSearchPhase extends SearchPhase {
|
|||
context.getSearchTransport().sendExecuteMultiSearch(multiRequest, context.getTask(),
|
||||
ActionListener.wrap(response -> {
|
||||
Iterator<MultiSearchResponse.Item> it = response.iterator();
|
||||
for (SearchHit hit : searchResponse.getHits()) {
|
||||
for (SearchHit hit : searchResponse.hits.getHits()) {
|
||||
for (InnerHitBuilder innerHitBuilder : innerHitBuilders) {
|
||||
MultiSearchResponse.Item item = it.next();
|
||||
if (item.isFailure()) {
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.elasticsearch.transport.Transport;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
/**
|
||||
* This search phase merges the query results from the previous phase together and calculates the topN hits for this search.
|
||||
|
@ -46,7 +46,7 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
private final AtomicArray<FetchSearchResult> fetchResults;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final AtomicArray<SearchPhaseResult> queryResults;
|
||||
private final Function<SearchResponse, SearchPhase> nextPhaseFactory;
|
||||
private final BiFunction<InternalSearchResponse, String, SearchPhase> nextPhaseFactory;
|
||||
private final SearchPhaseContext context;
|
||||
private final Logger logger;
|
||||
private final InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> resultConsumer;
|
||||
|
@ -55,13 +55,13 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
SearchPhaseController searchPhaseController,
|
||||
SearchPhaseContext context) {
|
||||
this(resultConsumer, searchPhaseController, context,
|
||||
(response) -> new ExpandSearchPhase(context, response, // collapse only happens if the request has inner hits
|
||||
(finalResponse) -> sendResponsePhase(finalResponse, context)));
|
||||
(response, scrollId) -> new ExpandSearchPhase(context, response, // collapse only happens if the request has inner hits
|
||||
(finalResponse) -> sendResponsePhase(finalResponse, scrollId, context)));
|
||||
}
|
||||
|
||||
FetchSearchPhase(InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> resultConsumer,
|
||||
SearchPhaseController searchPhaseController,
|
||||
SearchPhaseContext context, Function<SearchResponse, SearchPhase> nextPhaseFactory) {
|
||||
SearchPhaseContext context, BiFunction<InternalSearchResponse, String, SearchPhase> nextPhaseFactory) {
|
||||
super("fetch");
|
||||
if (context.getNumShards() != resultConsumer.getNumShards()) {
|
||||
throw new IllegalStateException("number of shards must match the length of the query results but doesn't:"
|
||||
|
@ -205,14 +205,14 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
AtomicArray<? extends SearchPhaseResult> fetchResultsArr) {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(context.getRequest().scroll() != null,
|
||||
reducedQueryPhase, fetchResultsArr.asList(), fetchResultsArr::get);
|
||||
context.executeNextPhase(this, nextPhaseFactory.apply(context.buildSearchResponse(internalResponse, scrollId)));
|
||||
context.executeNextPhase(this, nextPhaseFactory.apply(internalResponse, scrollId));
|
||||
}
|
||||
|
||||
private static SearchPhase sendResponsePhase(SearchResponse response, SearchPhaseContext context) {
|
||||
private static SearchPhase sendResponsePhase(InternalSearchResponse response, String scrollId, SearchPhaseContext context) {
|
||||
return new SearchPhase("response") {
|
||||
@Override
|
||||
public void run() throws IOException {
|
||||
context.onResponse(response);
|
||||
context.onResponse(context.buildSearchResponse(response, scrollId));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -19,12 +19,16 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.common.inject.internal.Nullable;
|
||||
|
||||
class ScrollIdForNode {
|
||||
private final String node;
|
||||
private final long scrollId;
|
||||
private final String clusterAlias;
|
||||
|
||||
ScrollIdForNode(String node, long scrollId) {
|
||||
ScrollIdForNode(@Nullable String clusterAlias, String node, long scrollId) {
|
||||
this.node = node;
|
||||
this.clusterAlias = clusterAlias;
|
||||
this.scrollId = scrollId;
|
||||
}
|
||||
|
||||
|
@ -32,7 +36,20 @@ class ScrollIdForNode {
|
|||
return node;
|
||||
}
|
||||
|
||||
public String getClusterAlias() {
|
||||
return clusterAlias;
|
||||
}
|
||||
|
||||
public long getScrollId() {
|
||||
return scrollId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ScrollIdForNode{" +
|
||||
"node='" + node + '\'' +
|
||||
", scrollId=" + scrollId +
|
||||
", clusterAlias='" + clusterAlias + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -405,9 +405,18 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
* @param queryResults a list of non-null query shard results
|
||||
*/
|
||||
public ReducedQueryPhase reducedQueryPhase(Collection<? extends SearchPhaseResult> queryResults, boolean isScrollRequest) {
|
||||
return reducedQueryPhase(queryResults, null, new ArrayList<>(), new TopDocsStats(), 0, isScrollRequest);
|
||||
return reducedQueryPhase(queryResults, isScrollRequest, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces the given query results and consumes all aggregations and profile results.
|
||||
* @param queryResults a list of non-null query shard results
|
||||
*/
|
||||
public ReducedQueryPhase reducedQueryPhase(Collection<? extends SearchPhaseResult> queryResults, boolean isScrollRequest, boolean trackTotalHits) {
|
||||
return reducedQueryPhase(queryResults, null, new ArrayList<>(), new TopDocsStats(trackTotalHits), 0, isScrollRequest);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Reduces the given query results and consumes all aggregations and profile results.
|
||||
* @param queryResults a list of non-null query shard results
|
||||
|
@ -711,6 +720,7 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
boolean isScrollRequest = request.scroll() != null;
|
||||
final boolean hasAggs = source != null && source.aggregations() != null;
|
||||
final boolean hasTopDocs = source == null || source.size() != 0;
|
||||
final boolean trackTotalHits = source == null || source.trackTotalHits();
|
||||
|
||||
if (isScrollRequest == false && (hasAggs || hasTopDocs)) {
|
||||
// no incremental reduce if scroll is used - we only hit a single shard or sometimes more...
|
||||
|
@ -722,18 +732,30 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
return new InitialSearchPhase.SearchPhaseResults(numShards) {
|
||||
@Override
|
||||
public ReducedQueryPhase reduce() {
|
||||
return reducedQueryPhase(results.asList(), isScrollRequest);
|
||||
return reducedQueryPhase(results.asList(), isScrollRequest, trackTotalHits);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
static final class TopDocsStats {
|
||||
final boolean trackTotalHits;
|
||||
long totalHits;
|
||||
long fetchHits;
|
||||
float maxScore = Float.NEGATIVE_INFINITY;
|
||||
|
||||
TopDocsStats() {
|
||||
this(true);
|
||||
}
|
||||
|
||||
TopDocsStats(boolean trackTotalHits) {
|
||||
this.trackTotalHits = trackTotalHits;
|
||||
this.totalHits = trackTotalHits ? 0 : -1;
|
||||
}
|
||||
|
||||
void add(TopDocs topDocs) {
|
||||
totalHits += topDocs.totalHits;
|
||||
if (trackTotalHits) {
|
||||
totalHits += topDocs.totalHits;
|
||||
}
|
||||
fetchHits += topDocs.scoreDocs.length;
|
||||
if (!Float.isNaN(topDocs.getMaxScore())) {
|
||||
maxScore = Math.max(maxScore, topDocs.getMaxScore());
|
||||
|
|
|
@ -39,6 +39,8 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* A request to execute search against one or more indices (or all). Best created using
|
||||
* {@link org.elasticsearch.client.Requests#searchRequest(String...)}.
|
||||
|
@ -102,7 +104,12 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
|
|||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (source != null && source.trackTotalHits() == false && scroll() != null) {
|
||||
validationException =
|
||||
addValidationError("disabling [track_total_hits] is not allowed in a scroll context", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -363,14 +363,21 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
}
|
||||
|
||||
/**
|
||||
* Applies when sorting, and controls if scores will be tracked as well. Defaults to
|
||||
* <tt>false</tt>.
|
||||
* Applies when sorting, and controls if scores will be tracked as well. Defaults to <tt>false</tt>.
|
||||
*/
|
||||
public SearchRequestBuilder setTrackScores(boolean trackScores) {
|
||||
sourceBuilder().trackScores(trackScores);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates if the total hit count for the query should be tracked. Defaults to <tt>true</tt>
|
||||
*/
|
||||
public SearchRequestBuilder setTrackTotalHits(boolean trackTotalHits) {
|
||||
sourceBuilder().trackTotalHits(trackTotalHits);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds stored fields to load and return (note, it must be stored) as part of the search request.
|
||||
* To disable the stored fields entirely (source and metadata fields) use {@code storedField("_none_")}.
|
||||
|
|
|
@ -45,8 +45,6 @@ import java.util.Map;
|
|||
|
||||
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -135,13 +133,6 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
return new TimeValue(tookInMillis);
|
||||
}
|
||||
|
||||
/**
|
||||
* How long the search took in milliseconds.
|
||||
*/
|
||||
public long getTookInMillis() {
|
||||
return tookInMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* The total number of shards the search was executed on.
|
||||
*/
|
||||
|
@ -252,7 +243,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
} else if (NUM_REDUCE_PHASES.match(currentFieldName)) {
|
||||
numReducePhases = parser.intValue();
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
parser.skipChildren();
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (SearchHits.Fields.HITS.equals(currentFieldName)) {
|
||||
|
@ -275,7 +266,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
} else if (RestActions.TOTAL_FIELD.match(currentFieldName)) {
|
||||
totalShards = parser.intValue();
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
parser.skipChildren();
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if (RestActions.FAILURES_FIELD.match(currentFieldName)) {
|
||||
|
@ -283,14 +274,14 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
failures.add(ShardSearchFailure.fromXContent(parser));
|
||||
}
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
parser.skipChildren();
|
||||
}
|
||||
} else {
|
||||
throwUnknownToken(token, parser.getTokenLocation());
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,286 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.transport.RemoteClusterService;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
/**
|
||||
* Abstract base class for scroll execution modes. This class encapsulates the basic logic to
|
||||
* fan out to nodes and execute the query part of the scroll request. Subclasses can for instance
|
||||
* run separate fetch phases etc.
|
||||
*/
|
||||
abstract class SearchScrollAsyncAction<T extends SearchPhaseResult> implements Runnable {
|
||||
/*
|
||||
* Some random TODO:
|
||||
* Today we still have a dedicated executing mode for scrolls while we could simplify this by implementing
|
||||
* scroll like functionality (mainly syntactic sugar) as an ordinary search with search_after. We could even go further and
|
||||
* make the scroll entirely stateless and encode the state per shard in the scroll ID.
|
||||
*
|
||||
* Today we also hold a context per shard but maybe
|
||||
* we want the context per coordinating node such that we route the scroll to the same coordinator all the time and hold the context
|
||||
* here? This would have the advantage that if we loose that node the entire scroll is deal not just one shard.
|
||||
*
|
||||
* Additionally there is the possibility to associate the scroll with a seq. id. such that we can talk to any replica as long as
|
||||
* the shards engine hasn't advanced that seq. id yet. Such a resume is possible and best effort, it could be even a safety net since
|
||||
* if you rely on indices being read-only things can change in-between without notification or it's hard to detect if there where any
|
||||
* changes while scrolling. These are all options to improve the current situation which we can look into down the road
|
||||
*/
|
||||
protected final Logger logger;
|
||||
protected final ActionListener<SearchResponse> listener;
|
||||
protected final ParsedScrollId scrollId;
|
||||
protected final DiscoveryNodes nodes;
|
||||
protected final SearchPhaseController searchPhaseController;
|
||||
protected final SearchScrollRequest request;
|
||||
protected final SearchTransportService searchTransportService;
|
||||
private final long startTime;
|
||||
private final List<ShardSearchFailure> shardFailures = new ArrayList<>();
|
||||
private final AtomicInteger successfulOps;
|
||||
|
||||
protected SearchScrollAsyncAction(ParsedScrollId scrollId, Logger logger, DiscoveryNodes nodes,
|
||||
ActionListener<SearchResponse> listener, SearchPhaseController searchPhaseController,
|
||||
SearchScrollRequest request,
|
||||
SearchTransportService searchTransportService) {
|
||||
this.startTime = System.currentTimeMillis();
|
||||
this.scrollId = scrollId;
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
this.logger = logger;
|
||||
this.listener = listener;
|
||||
this.nodes = nodes;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.request = request;
|
||||
this.searchTransportService = searchTransportService;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds how long it took to execute the search.
|
||||
*/
|
||||
private long buildTookInMillis() {
|
||||
// protect ourselves against time going backwards
|
||||
// negative values don't make sense and we want to be able to serialize that thing as a vLong
|
||||
return Math.max(1, System.currentTimeMillis() - startTime);
|
||||
}
|
||||
|
||||
public final void run() {
|
||||
final ScrollIdForNode[] context = scrollId.getContext();
|
||||
if (context.length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
} else {
|
||||
collectNodesAndRun(Arrays.asList(context), nodes, searchTransportService, ActionListener.wrap(lookup -> run(lookup, context),
|
||||
listener::onFailure));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This method collects nodes from the remote clusters asynchronously if any of the scroll IDs references a remote cluster.
|
||||
* Otherwise the action listener will be invoked immediately with a function based on the given discovery nodes.
|
||||
*/
|
||||
static void collectNodesAndRun(final Iterable<ScrollIdForNode> scrollIds, DiscoveryNodes nodes,
|
||||
SearchTransportService searchTransportService,
|
||||
ActionListener<BiFunction<String, String, DiscoveryNode>> listener) {
|
||||
Set<String> clusters = new HashSet<>();
|
||||
for (ScrollIdForNode target : scrollIds) {
|
||||
if (target.getClusterAlias() != null) {
|
||||
clusters.add(target.getClusterAlias());
|
||||
}
|
||||
}
|
||||
if (clusters.isEmpty()) { // no remote clusters
|
||||
listener.onResponse((cluster, node) -> nodes.get(node));
|
||||
} else {
|
||||
RemoteClusterService remoteClusterService = searchTransportService.getRemoteClusterService();
|
||||
remoteClusterService.collectNodes(clusters, ActionListener.wrap(nodeFunction -> {
|
||||
final BiFunction<String, String, DiscoveryNode> clusterNodeLookup = (clusterAlias, node) -> {
|
||||
if (clusterAlias == null) {
|
||||
return nodes.get(node);
|
||||
} else {
|
||||
return nodeFunction.apply(clusterAlias, node);
|
||||
}
|
||||
};
|
||||
listener.onResponse(clusterNodeLookup);
|
||||
}, listener::onFailure));
|
||||
}
|
||||
}
|
||||
|
||||
private void run(BiFunction<String, String, DiscoveryNode> clusterNodeLookup, final ScrollIdForNode[] context) {
|
||||
final CountDown counter = new CountDown(scrollId.getContext().length);
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
final int shardIndex = i;
|
||||
final Transport.Connection connection;
|
||||
try {
|
||||
DiscoveryNode node = clusterNodeLookup.apply(target.getClusterAlias(), target.getNode());
|
||||
if (node == null) {
|
||||
throw new IllegalStateException("node [" + target.getNode() + "] is not available");
|
||||
}
|
||||
connection = getConnection(target.getClusterAlias(), node);
|
||||
} catch (Exception ex) {
|
||||
onShardFailure("query", counter, target.getScrollId(),
|
||||
ex, null, () -> SearchScrollAsyncAction.this.moveToNextPhase(clusterNodeLookup));
|
||||
continue;
|
||||
}
|
||||
final InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(target.getScrollId(), request);
|
||||
// we can't create a SearchShardTarget here since we don't know the index and shard ID we are talking to
|
||||
// we only know the node and the search context ID. Yet, the response will contain the SearchShardTarget
|
||||
// from the target node instead...that's why we pass null here
|
||||
SearchActionListener<T> searchActionListener = new SearchActionListener<T>(null, shardIndex) {
|
||||
|
||||
@Override
|
||||
protected void setSearchShardTarget(T response) {
|
||||
// don't do this - it's part of the response...
|
||||
assert response.getSearchShardTarget() != null : "search shard target must not be null";
|
||||
if (target.getClusterAlias() != null) {
|
||||
// re-create the search target and add the cluster alias if there is any,
|
||||
// we need this down the road for subseq. phases
|
||||
SearchShardTarget searchShardTarget = response.getSearchShardTarget();
|
||||
response.setSearchShardTarget(new SearchShardTarget(searchShardTarget.getNodeId(), searchShardTarget.getShardId(),
|
||||
target.getClusterAlias(), null));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void innerOnResponse(T result) {
|
||||
assert shardIndex == result.getShardIndex() : "shard index mismatch: " + shardIndex + " but got: "
|
||||
+ result.getShardIndex();
|
||||
onFirstPhaseResult(shardIndex, result);
|
||||
if (counter.countDown()) {
|
||||
SearchPhase phase = moveToNextPhase(clusterNodeLookup);
|
||||
try {
|
||||
phase.run();
|
||||
} catch (Exception e) {
|
||||
// we need to fail the entire request here - the entire phase just blew up
|
||||
// don't call onShardFailure or onFailure here since otherwise we'd countDown the counter
|
||||
// again which would result in an exception
|
||||
listener.onFailure(new SearchPhaseExecutionException(phase.getName(), "Phase failed", e,
|
||||
ShardSearchFailure.EMPTY_ARRAY));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
onShardFailure("query", counter, target.getScrollId(), t, null,
|
||||
() -> SearchScrollAsyncAction.this.moveToNextPhase(clusterNodeLookup));
|
||||
}
|
||||
};
|
||||
executeInitialPhase(connection, internalRequest, searchActionListener);
|
||||
}
|
||||
}
|
||||
|
||||
synchronized ShardSearchFailure[] buildShardFailures() { // pkg private for testing
|
||||
if (shardFailures.isEmpty()) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
return shardFailures.toArray(new ShardSearchFailure[shardFailures.size()]);
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
private synchronized void addShardFailure(ShardSearchFailure failure) {
|
||||
shardFailures.add(failure);
|
||||
}
|
||||
|
||||
protected abstract void executeInitialPhase(Transport.Connection connection, InternalScrollSearchRequest internalRequest,
|
||||
SearchActionListener<T> searchActionListener);
|
||||
|
||||
protected abstract SearchPhase moveToNextPhase(BiFunction<String, String, DiscoveryNode> clusterNodeLookup);
|
||||
|
||||
protected abstract void onFirstPhaseResult(int shardId, T result);
|
||||
|
||||
protected SearchPhase sendResponsePhase(SearchPhaseController.ReducedQueryPhase queryPhase,
|
||||
final AtomicArray<? extends SearchPhaseResult> fetchResults) {
|
||||
return new SearchPhase("fetch") {
|
||||
@Override
|
||||
public void run() throws IOException {
|
||||
sendResponse(queryPhase, fetchResults);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
protected final void sendResponse(SearchPhaseController.ReducedQueryPhase queryPhase,
|
||||
final AtomicArray<? extends SearchPhaseResult> fetchResults) {
|
||||
try {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, queryPhase, fetchResults.asList(),
|
||||
fetchResults::get);
|
||||
// the scroll ID never changes we always return the same ID. This ID contains all the shards and their context ids
|
||||
// such that we can talk to them abgain in the next roundtrip.
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "inner finish failed", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
protected void onShardFailure(String phaseName, final CountDown counter, final long searchId, Exception failure,
|
||||
@Nullable SearchShardTarget searchShardTarget,
|
||||
Supplier<SearchPhase> nextPhaseSupplier) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute {} phase", searchId, phaseName), failure);
|
||||
}
|
||||
addShardFailure(new ShardSearchFailure(failure, searchShardTarget));
|
||||
int successfulOperations = successfulOps.decrementAndGet();
|
||||
assert successfulOperations >= 0 : "successfulOperations must be >= 0 but was: " + successfulOperations;
|
||||
if (counter.countDown()) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException(phaseName, "all shards failed", failure, buildShardFailures()));
|
||||
} else {
|
||||
SearchPhase phase = nextPhaseSupplier.get();
|
||||
try {
|
||||
phase.run();
|
||||
} catch (Exception e) {
|
||||
e.addSuppressed(failure);
|
||||
listener.onFailure(new SearchPhaseExecutionException(phase.getName(), "Phase failed", e,
|
||||
ShardSearchFailure.EMPTY_ARRAY));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) {
|
||||
return searchTransportService.getConnection(clusterAlias, node);
|
||||
}
|
||||
}
|
|
@ -20,166 +20,43 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest;
|
||||
final class SearchScrollQueryAndFetchAsyncAction extends SearchScrollAsyncAction<ScrollQueryFetchSearchResult> {
|
||||
|
||||
final class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final Logger logger;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchScrollRequest request;
|
||||
private final SearchTask task;
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
private final ParsedScrollId scrollId;
|
||||
private final DiscoveryNodes nodes;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
private final AtomicInteger successfulOps;
|
||||
private final AtomicInteger counter;
|
||||
|
||||
SearchScrollQueryAndFetchAsyncAction(Logger logger, ClusterService clusterService, SearchTransportService searchTransportService,
|
||||
SearchPhaseController searchPhaseController, SearchScrollRequest request, SearchTask task,
|
||||
ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.request = request;
|
||||
super(scrollId, logger, clusterService.state().nodes(), listener, searchPhaseController, request, searchTransportService);
|
||||
this.task = task;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
this.counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
this.queryFetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
private ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<ShardSearchFailure> failures = shardFailures.asList();
|
||||
return failures.toArray(new ShardSearchFailure[failures.size()]);
|
||||
@Override
|
||||
protected void executeInitialPhase(Transport.Connection connection, InternalScrollSearchRequest internalRequest,
|
||||
SearchActionListener<ScrollQueryFetchSearchResult> searchActionListener) {
|
||||
searchTransportService.sendExecuteScrollFetch(connection, internalRequest, task, searchActionListener);
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
private void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
@Override
|
||||
protected SearchPhase moveToNextPhase(BiFunction<String, String, DiscoveryNode> clusterNodeLookup) {
|
||||
return sendResponsePhase(searchPhaseController.reducedQueryPhase(queryFetchResults.asList(), true), queryFetchResults);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executePhase(i, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource());
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (ScrollIdForNode target : scrollId.getContext()) {
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource());
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchTransportService.sendExecuteScrollFetch(node, internalRequest, task,
|
||||
new SearchActionListener<ScrollQueryFetchSearchResult>(null, shardIndex) {
|
||||
@Override
|
||||
protected void setSearchShardTarget(ScrollQueryFetchSearchResult response) {
|
||||
// don't do this - it's part of the response...
|
||||
assert response.getSearchShardTarget() != null : "search shard target must not be null";
|
||||
}
|
||||
@Override
|
||||
protected void innerOnResponse(ScrollQueryFetchSearchResult response) {
|
||||
queryFetchResults.set(response.getShardIndex(), response.result());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
onPhaseFailure(t, searchId, shardIndex);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void onPhaseFailure(Exception e, long searchId, int shardIndex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), e);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(e));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query_fetch", "all shards failed", e, buildShardFailures()));
|
||||
} else {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() throws Exception {
|
||||
List<QueryFetchSearchResult> queryFetchSearchResults = queryFetchResults.asList();
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true,
|
||||
searchPhaseController.reducedQueryPhase(queryFetchSearchResults, true), queryFetchSearchResults, queryFetchResults::get);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
@Override
|
||||
protected void onFirstPhaseResult(int shardId, ScrollQueryFetchSearchResult result) {
|
||||
queryFetchResults.setOnce(shardId, result.result());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,215 +21,105 @@ package org.elasticsearch.action.search;
|
|||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchRequest;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.io.IOException;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest;
|
||||
final class SearchScrollQueryThenFetchAsyncAction extends SearchScrollAsyncAction<ScrollQuerySearchResult> {
|
||||
|
||||
final class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final Logger logger;
|
||||
private final SearchTask task;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final SearchScrollRequest request;
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
private final ParsedScrollId scrollId;
|
||||
private final DiscoveryNodes nodes;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
private final AtomicInteger successfulOps;
|
||||
private final AtomicArray<FetchSearchResult> fetchResults;
|
||||
private final AtomicArray<QuerySearchResult> queryResults;
|
||||
|
||||
SearchScrollQueryThenFetchAsyncAction(Logger logger, ClusterService clusterService, SearchTransportService searchTransportService,
|
||||
SearchPhaseController searchPhaseController, SearchScrollRequest request, SearchTask task,
|
||||
ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.request = request;
|
||||
super(scrollId, logger, clusterService.state().nodes(), listener, searchPhaseController, request,
|
||||
searchTransportService);
|
||||
this.task = task;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
this.queryResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
this.fetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
this.queryResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
private ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<ShardSearchFailure> failures = shardFailures.asList();
|
||||
return failures.toArray(new ShardSearchFailure[failures.size()]);
|
||||
protected void onFirstPhaseResult(int shardId, ScrollQuerySearchResult result) {
|
||||
queryResults.setOnce(shardId, result.queryResult());
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
private void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
@Override
|
||||
protected void executeInitialPhase(Transport.Connection connection, InternalScrollSearchRequest internalRequest,
|
||||
SearchActionListener<ScrollQuerySearchResult> searchActionListener) {
|
||||
searchTransportService.sendExecuteScrollQuery(connection, internalRequest, task, searchActionListener);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
final CountDown counter = new CountDown(scrollId.getContext().length);
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executeQueryPhase(i, counter, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource());
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.countDown()) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeQueryPhase(final int shardIndex, final CountDown counter, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchTransportService.sendExecuteScrollQuery(node, internalRequest, task,
|
||||
new SearchActionListener<ScrollQuerySearchResult>(null, shardIndex) {
|
||||
|
||||
@Override
|
||||
protected SearchPhase moveToNextPhase(BiFunction<String, String, DiscoveryNode> clusterNodeLookup) {
|
||||
return new SearchPhase("fetch") {
|
||||
@Override
|
||||
protected void setSearchShardTarget(ScrollQuerySearchResult response) {
|
||||
// don't do this - it's part of the response...
|
||||
assert response.getSearchShardTarget() != null : "search shard target must not be null";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void innerOnResponse(ScrollQuerySearchResult result) {
|
||||
queryResults.setOnce(result.getShardIndex(), result.queryResult());
|
||||
if (counter.countDown()) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
public void run() throws IOException {
|
||||
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(
|
||||
queryResults.asList(), true);
|
||||
if (reducedQueryPhase.scoreDocs.length == 0) {
|
||||
sendResponse(reducedQueryPhase, fetchResults);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
onQueryPhaseFailure(shardIndex, counter, searchId, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(queryResults.length(),
|
||||
reducedQueryPhase.scoreDocs);
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase,
|
||||
queryResults.length());
|
||||
final CountDown counter = new CountDown(docIdsToLoad.length);
|
||||
for (int i = 0; i < docIdsToLoad.length; i++) {
|
||||
final int index = i;
|
||||
final IntArrayList docIds = docIdsToLoad[index];
|
||||
if (docIds != null) {
|
||||
final QuerySearchResult querySearchResult = queryResults.get(index);
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[index];
|
||||
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.getRequestId(), docIds,
|
||||
lastEmittedDoc);
|
||||
SearchShardTarget searchShardTarget = querySearchResult.getSearchShardTarget();
|
||||
DiscoveryNode node = clusterNodeLookup.apply(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId());
|
||||
assert node != null : "target node is null in secondary phase";
|
||||
Transport.Connection connection = getConnection(searchShardTarget.getClusterAlias(), node);
|
||||
searchTransportService.sendExecuteFetchScroll(connection, shardFetchRequest, task,
|
||||
new SearchActionListener<FetchSearchResult>(querySearchResult.getSearchShardTarget(), index) {
|
||||
@Override
|
||||
protected void innerOnResponse(FetchSearchResult response) {
|
||||
fetchResults.setOnce(response.getShardIndex(), response);
|
||||
if (counter.countDown()) {
|
||||
sendResponse(reducedQueryPhase, fetchResults);
|
||||
}
|
||||
}
|
||||
|
||||
void onQueryPhaseFailure(final int shardIndex, final CountDown counter, final long searchId, Exception failure) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(failure));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.countDown()) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", failure, buildShardFailures()));
|
||||
} else {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Exception e) {
|
||||
e.addSuppressed(failure);
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeFetchPhase() throws Exception {
|
||||
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResults.asList(),
|
||||
true);
|
||||
if (reducedQueryPhase.scoreDocs.length == 0) {
|
||||
finishHim(reducedQueryPhase);
|
||||
return;
|
||||
}
|
||||
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(queryResults.length(), reducedQueryPhase.scoreDocs);
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, queryResults.length());
|
||||
final CountDown counter = new CountDown(docIdsToLoad.length);
|
||||
for (int i = 0; i < docIdsToLoad.length; i++) {
|
||||
final int index = i;
|
||||
final IntArrayList docIds = docIdsToLoad[index];
|
||||
if (docIds != null) {
|
||||
final QuerySearchResult querySearchResult = queryResults.get(index);
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[index];
|
||||
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.getRequestId(), docIds, lastEmittedDoc);
|
||||
DiscoveryNode node = nodes.get(querySearchResult.getSearchShardTarget().getNodeId());
|
||||
searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, task,
|
||||
new SearchActionListener<FetchSearchResult>(querySearchResult.getSearchShardTarget(), index) {
|
||||
@Override
|
||||
protected void innerOnResponse(FetchSearchResult response) {
|
||||
fetchResults.setOnce(response.getShardIndex(), response);
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
onShardFailure(getName(), counter, querySearchResult.getRequestId(),
|
||||
t, querySearchResult.getSearchShardTarget(),
|
||||
() -> sendResponsePhase(reducedQueryPhase, fetchResults));
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// the counter is set to the total size of docIdsToLoad
|
||||
// which can have null values so we have to count them down too
|
||||
if (counter.countDown()) {
|
||||
finishHim(reducedQueryPhase);
|
||||
sendResponse(reducedQueryPhase, fetchResults);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Failed to execute fetch phase", t);
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.countDown()) {
|
||||
finishHim(reducedQueryPhase);
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// the counter is set to the total size of docIdsToLoad which can have null values so we have to count them down too
|
||||
if (counter.countDown()) {
|
||||
finishHim(reducedQueryPhase);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private void finishHim(SearchPhaseController.ReducedQueryPhase queryPhase) {
|
||||
try {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, queryPhase, fetchResults.asList(),
|
||||
fetchResults::get);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "inner finish failed", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -98,14 +98,14 @@ public class SearchTransportService extends AbstractComponent {
|
|||
}, SearchFreeContextResponse::new));
|
||||
}
|
||||
|
||||
public void sendFreeContext(DiscoveryNode node, long contextId, final ActionListener<SearchFreeContextResponse> listener) {
|
||||
transportService.sendRequest(node, FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextRequest(contextId),
|
||||
new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::new));
|
||||
public void sendFreeContext(Transport.Connection connection, long contextId, final ActionListener<SearchFreeContextResponse> listener) {
|
||||
transportService.sendRequest(connection, FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextRequest(contextId),
|
||||
TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::new));
|
||||
}
|
||||
|
||||
public void sendClearAllScrollContexts(DiscoveryNode node, final ActionListener<TransportResponse> listener) {
|
||||
transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, TransportRequest.Empty.INSTANCE,
|
||||
new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
|
||||
public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener<TransportResponse> listener) {
|
||||
transportService.sendRequest(connection, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, TransportRequest.Empty.INSTANCE,
|
||||
TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
|
||||
}
|
||||
|
||||
public void sendExecuteDfs(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task,
|
||||
|
@ -145,15 +145,15 @@ public class SearchTransportService extends AbstractComponent {
|
|||
new ActionListenerResponseHandler<>(listener, QuerySearchResult::new));
|
||||
}
|
||||
|
||||
public void sendExecuteScrollQuery(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task,
|
||||
public void sendExecuteScrollQuery(Transport.Connection connection, final InternalScrollSearchRequest request, SearchTask task,
|
||||
final SearchActionListener<ScrollQuerySearchResult> listener) {
|
||||
transportService.sendChildRequest(transportService.getConnection(node), QUERY_SCROLL_ACTION_NAME, request, task,
|
||||
transportService.sendChildRequest(connection, QUERY_SCROLL_ACTION_NAME, request, task,
|
||||
new ActionListenerResponseHandler<>(listener, ScrollQuerySearchResult::new));
|
||||
}
|
||||
|
||||
public void sendExecuteScrollFetch(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task,
|
||||
public void sendExecuteScrollFetch(Transport.Connection connection, final InternalScrollSearchRequest request, SearchTask task,
|
||||
final SearchActionListener<ScrollQueryFetchSearchResult> listener) {
|
||||
transportService.sendChildRequest(transportService.getConnection(node), QUERY_FETCH_SCROLL_ACTION_NAME, request, task,
|
||||
transportService.sendChildRequest(connection, QUERY_FETCH_SCROLL_ACTION_NAME, request, task,
|
||||
new ActionListenerResponseHandler<>(listener, ScrollQueryFetchSearchResult::new));
|
||||
}
|
||||
|
||||
|
@ -162,9 +162,9 @@ public class SearchTransportService extends AbstractComponent {
|
|||
sendExecuteFetch(connection, FETCH_ID_ACTION_NAME, request, task, listener);
|
||||
}
|
||||
|
||||
public void sendExecuteFetchScroll(DiscoveryNode node, final ShardFetchRequest request, SearchTask task,
|
||||
public void sendExecuteFetchScroll(Transport.Connection connection, final ShardFetchRequest request, SearchTask task,
|
||||
final SearchActionListener<FetchSearchResult> listener) {
|
||||
sendExecuteFetch(transportService.getConnection(node), FETCH_ID_SCROLL_ACTION_NAME, request, task, listener);
|
||||
sendExecuteFetch(connection, FETCH_ID_SCROLL_ACTION_NAME, request, task, listener);
|
||||
}
|
||||
|
||||
private void sendExecuteFetch(Transport.Connection connection, String action, final ShardFetchRequest request, SearchTask task,
|
||||
|
|
|
@ -38,8 +38,6 @@ import org.elasticsearch.search.SearchShardTarget;
|
|||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken;
|
||||
|
||||
/**
|
||||
* Represents a failure to search on a specific shard.
|
||||
|
@ -200,16 +198,16 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
|||
} else if (NODE_FIELD.equals(currentFieldName)) {
|
||||
nodeId = parser.text();
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
parser.skipChildren();
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (REASON_FIELD.equals(currentFieldName)) {
|
||||
exception = ElasticsearchException.fromXContent(parser);
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
parser.skipChildren();
|
||||
}
|
||||
} else {
|
||||
throwUnknownToken(token, parser.getTokenLocation());
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
return new ShardSearchFailure(exception,
|
||||
|
|
|
@ -19,30 +19,16 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId;
|
||||
|
||||
public class TransportClearScrollAction extends HandledTransportAction<ClearScrollRequest, ClearScrollResponse> {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
|
@ -53,105 +39,16 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
|||
ClusterService clusterService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchTransportService searchTransportService) {
|
||||
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new);
|
||||
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ClearScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchTransportService = searchTransportService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(ClearScrollRequest request, final ActionListener<ClearScrollResponse> listener) {
|
||||
new Async(request, listener, clusterService.state()).run();
|
||||
}
|
||||
|
||||
private class Async {
|
||||
final DiscoveryNodes nodes;
|
||||
final CountDown expectedOps;
|
||||
final List<ScrollIdForNode[]> contexts = new ArrayList<>();
|
||||
final ActionListener<ClearScrollResponse> listener;
|
||||
final AtomicReference<Throwable> expHolder;
|
||||
final AtomicInteger numberOfFreedSearchContexts = new AtomicInteger(0);
|
||||
|
||||
private Async(ClearScrollRequest request, ActionListener<ClearScrollResponse> listener, ClusterState clusterState) {
|
||||
int expectedOps = 0;
|
||||
this.nodes = clusterState.nodes();
|
||||
if (request.getScrollIds().size() == 1 && "_all".equals(request.getScrollIds().get(0))) {
|
||||
expectedOps = nodes.getSize();
|
||||
} else {
|
||||
for (String parsedScrollId : request.getScrollIds()) {
|
||||
ScrollIdForNode[] context = parseScrollId(parsedScrollId).getContext();
|
||||
expectedOps += context.length;
|
||||
this.contexts.add(context);
|
||||
}
|
||||
}
|
||||
this.listener = listener;
|
||||
this.expHolder = new AtomicReference<>();
|
||||
this.expectedOps = new CountDown(expectedOps);
|
||||
}
|
||||
|
||||
public void run() {
|
||||
if (expectedOps.isCountedDown()) {
|
||||
listener.onResponse(new ClearScrollResponse(true, 0));
|
||||
return;
|
||||
}
|
||||
|
||||
if (contexts.isEmpty()) {
|
||||
for (final DiscoveryNode node : nodes) {
|
||||
searchTransportService.sendClearAllScrollContexts(node, new ActionListener<TransportResponse>() {
|
||||
@Override
|
||||
public void onResponse(TransportResponse response) {
|
||||
onFreedContext(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
onFailedFreedContext(e, node);
|
||||
}
|
||||
});
|
||||
}
|
||||
} else {
|
||||
for (ScrollIdForNode[] context : contexts) {
|
||||
for (ScrollIdForNode target : context) {
|
||||
final DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
onFreedContext(false);
|
||||
continue;
|
||||
}
|
||||
|
||||
searchTransportService.sendFreeContext(node, target.getScrollId(), new ActionListener<SearchTransportService.SearchFreeContextResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchTransportService.SearchFreeContextResponse freed) {
|
||||
onFreedContext(freed.isFreed());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
onFailedFreedContext(e, node);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void onFreedContext(boolean freed) {
|
||||
if (freed) {
|
||||
numberOfFreedSearchContexts.incrementAndGet();
|
||||
}
|
||||
if (expectedOps.countDown()) {
|
||||
boolean succeeded = expHolder.get() == null;
|
||||
listener.onResponse(new ClearScrollResponse(succeeded, numberOfFreedSearchContexts.get()));
|
||||
}
|
||||
}
|
||||
|
||||
void onFailedFreedContext(Throwable e, DiscoveryNode node) {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e);
|
||||
if (expectedOps.countDown()) {
|
||||
listener.onResponse(new ClearScrollResponse(false, numberOfFreedSearchContexts.get()));
|
||||
} else {
|
||||
expHolder.set(e);
|
||||
}
|
||||
}
|
||||
|
||||
Runnable runnable = new ClearScrollController(request, listener, clusterService.state().nodes(), logger, searchTransportService);
|
||||
runnable.run();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,7 +23,9 @@ import org.apache.lucene.store.ByteArrayDataInput;
|
|||
import org.apache.lucene.store.RAMOutputStream;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.transport.RemoteClusterAware;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Base64;
|
||||
|
@ -40,7 +42,13 @@ final class TransportSearchHelper {
|
|||
out.writeVInt(searchPhaseResults.asList().size());
|
||||
for (SearchPhaseResult searchPhaseResult : searchPhaseResults.asList()) {
|
||||
out.writeLong(searchPhaseResult.getRequestId());
|
||||
out.writeString(searchPhaseResult.getSearchShardTarget().getNodeId());
|
||||
SearchShardTarget searchShardTarget = searchPhaseResult.getSearchShardTarget();
|
||||
if (searchShardTarget.getClusterAlias() != null) {
|
||||
out.writeString(RemoteClusterAware.buildRemoteIndexName(searchShardTarget.getClusterAlias(),
|
||||
searchShardTarget.getNodeId()));
|
||||
} else {
|
||||
out.writeString(searchShardTarget.getNodeId());
|
||||
}
|
||||
}
|
||||
byte[] bytes = new byte[(int) out.getFilePointer()];
|
||||
out.writeTo(bytes, 0);
|
||||
|
@ -57,7 +65,15 @@ final class TransportSearchHelper {
|
|||
for (int i = 0; i < context.length; ++i) {
|
||||
long id = in.readLong();
|
||||
String target = in.readString();
|
||||
context[i] = new ScrollIdForNode(target, id);
|
||||
String clusterAlias;
|
||||
final int index = target.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR);
|
||||
if (index == -1) {
|
||||
clusterAlias = null;
|
||||
} else {
|
||||
clusterAlias = target.substring(0, index);
|
||||
target = target.substring(index+1);
|
||||
}
|
||||
context[i] = new ScrollIdForNode(clusterAlias, target, id);
|
||||
}
|
||||
if (in.getPosition() != bytes.length) {
|
||||
throw new IllegalArgumentException("Not all bytes were read");
|
||||
|
|
|
@ -60,7 +60,7 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
|
|||
protected void doExecute(Task task, SearchScrollRequest request, ActionListener<SearchResponse> listener) {
|
||||
try {
|
||||
ParsedScrollId scrollId = parseScrollId(request.scrollId());
|
||||
AbstractAsyncAction action;
|
||||
Runnable action;
|
||||
switch (scrollId.getType()) {
|
||||
case QUERY_THEN_FETCH_TYPE:
|
||||
action = new SearchScrollQueryThenFetchAsyncAction(logger, clusterService, searchTransportService,
|
||||
|
@ -73,7 +73,7 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
|
|||
default:
|
||||
throw new IllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized");
|
||||
}
|
||||
action.start();
|
||||
action.run();
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
public final class GroupedActionListener<T> implements ActionListener<T> {
|
||||
private final CountDown countDown;
|
||||
private final AtomicInteger pos = new AtomicInteger();
|
||||
private final AtomicArray<T> roles;
|
||||
private final AtomicArray<T> results;
|
||||
private final ActionListener<Collection<T>> delegate;
|
||||
private final Collection<T> defaults;
|
||||
private final AtomicReference<Exception> failure = new AtomicReference<>();
|
||||
|
@ -49,7 +49,7 @@ public final class GroupedActionListener<T> implements ActionListener<T> {
|
|||
*/
|
||||
public GroupedActionListener(ActionListener<Collection<T>> delegate, int groupSize,
|
||||
Collection<T> defaults) {
|
||||
roles = new AtomicArray<>(groupSize);
|
||||
results = new AtomicArray<>(groupSize);
|
||||
countDown = new CountDown(groupSize);
|
||||
this.delegate = delegate;
|
||||
this.defaults = defaults;
|
||||
|
@ -57,12 +57,12 @@ public final class GroupedActionListener<T> implements ActionListener<T> {
|
|||
|
||||
@Override
|
||||
public void onResponse(T element) {
|
||||
roles.set(pos.incrementAndGet() - 1, element);
|
||||
results.setOnce(pos.incrementAndGet() - 1, element);
|
||||
if (countDown.countDown()) {
|
||||
if (failure.get() != null) {
|
||||
delegate.onFailure(failure.get());
|
||||
} else {
|
||||
List<T> collect = this.roles.asList();
|
||||
List<T> collect = this.results.asList();
|
||||
collect.addAll(defaults);
|
||||
delegate.onResponse(Collections.unmodifiableList(collect));
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.action.support;
|
||||
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
|
@ -43,6 +44,7 @@ public class IndicesOptions {
|
|||
private static final byte EXPAND_WILDCARDS_CLOSED = 8;
|
||||
private static final byte FORBID_ALIASES_TO_MULTIPLE_INDICES = 16;
|
||||
private static final byte FORBID_CLOSED_INDICES = 32;
|
||||
private static final byte IGNORE_ALIASES = 64;
|
||||
|
||||
private static final byte STRICT_EXPAND_OPEN = 6;
|
||||
private static final byte LENIENT_EXPAND_OPEN = 7;
|
||||
|
@ -51,10 +53,10 @@ public class IndicesOptions {
|
|||
private static final byte STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = 48;
|
||||
|
||||
static {
|
||||
byte max = 1 << 6;
|
||||
short max = 1 << 7;
|
||||
VALUES = new IndicesOptions[max];
|
||||
for (byte id = 0; id < max; id++) {
|
||||
VALUES[id] = new IndicesOptions(id);
|
||||
for (short id = 0; id < max; id++) {
|
||||
VALUES[id] = new IndicesOptions((byte)id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -106,18 +108,31 @@ public class IndicesOptions {
|
|||
* @return whether aliases pointing to multiple indices are allowed
|
||||
*/
|
||||
public boolean allowAliasesToMultipleIndices() {
|
||||
//true is default here, for bw comp we keep the first 16 values
|
||||
//in the array same as before + the default value for the new flag
|
||||
// true is default here, for bw comp we keep the first 16 values
|
||||
// in the array same as before + the default value for the new flag
|
||||
return (id & FORBID_ALIASES_TO_MULTIPLE_INDICES) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return whether aliases should be ignored (when resolving a wildcard)
|
||||
*/
|
||||
public boolean ignoreAliases() {
|
||||
return (id & IGNORE_ALIASES) != 0;
|
||||
}
|
||||
|
||||
public void writeIndicesOptions(StreamOutput out) throws IOException {
|
||||
out.write(id);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) {
|
||||
out.write(id);
|
||||
} else {
|
||||
// if we are talking to a node that doesn't support the newly added flag (ignoreAliases)
|
||||
// flip to 0 all the bits starting from the 7th
|
||||
out.write(id & 0x3f);
|
||||
}
|
||||
}
|
||||
|
||||
public static IndicesOptions readIndicesOptions(StreamInput in) throws IOException {
|
||||
//if we read from a node that doesn't support the newly added flag (allowAliasesToMultipleIndices)
|
||||
//we just receive the old corresponding value with the new flag set to true (default)
|
||||
//if we read from a node that doesn't support the newly added flag (ignoreAliases)
|
||||
//we just receive the old corresponding value with the new flag set to false (default)
|
||||
byte id = in.readByte();
|
||||
if (id >= VALUES.length) {
|
||||
throw new IllegalArgumentException("No valid missing index type id: " + id);
|
||||
|
@ -133,8 +148,16 @@ public class IndicesOptions {
|
|||
return fromOptions(ignoreUnavailable, allowNoIndices, expandToOpenIndices, expandToClosedIndices, defaultOptions.allowAliasesToMultipleIndices(), defaultOptions.forbidClosedIndices());
|
||||
}
|
||||
|
||||
static IndicesOptions fromOptions(boolean ignoreUnavailable, boolean allowNoIndices, boolean expandToOpenIndices, boolean expandToClosedIndices, boolean allowAliasesToMultipleIndices, boolean forbidClosedIndices) {
|
||||
byte id = toByte(ignoreUnavailable, allowNoIndices, expandToOpenIndices, expandToClosedIndices, allowAliasesToMultipleIndices, forbidClosedIndices);
|
||||
public static IndicesOptions fromOptions(boolean ignoreUnavailable, boolean allowNoIndices, boolean expandToOpenIndices,
|
||||
boolean expandToClosedIndices, boolean allowAliasesToMultipleIndices, boolean forbidClosedIndices) {
|
||||
return fromOptions(ignoreUnavailable, allowNoIndices, expandToOpenIndices, expandToClosedIndices, allowAliasesToMultipleIndices,
|
||||
forbidClosedIndices, false);
|
||||
}
|
||||
|
||||
public static IndicesOptions fromOptions(boolean ignoreUnavailable, boolean allowNoIndices, boolean expandToOpenIndices,
|
||||
boolean expandToClosedIndices, boolean allowAliasesToMultipleIndices, boolean forbidClosedIndices, boolean ignoreAliases) {
|
||||
byte id = toByte(ignoreUnavailable, allowNoIndices, expandToOpenIndices, expandToClosedIndices, allowAliasesToMultipleIndices,
|
||||
forbidClosedIndices, ignoreAliases);
|
||||
return VALUES[id];
|
||||
}
|
||||
|
||||
|
@ -246,7 +269,7 @@ public class IndicesOptions {
|
|||
}
|
||||
|
||||
private static byte toByte(boolean ignoreUnavailable, boolean allowNoIndices, boolean wildcardExpandToOpen,
|
||||
boolean wildcardExpandToClosed, boolean allowAliasesToMultipleIndices, boolean forbidClosedIndices) {
|
||||
boolean wildcardExpandToClosed, boolean allowAliasesToMultipleIndices, boolean forbidClosedIndices, boolean ignoreAliases) {
|
||||
byte id = 0;
|
||||
if (ignoreUnavailable) {
|
||||
id |= IGNORE_UNAVAILABLE;
|
||||
|
@ -268,6 +291,9 @@ public class IndicesOptions {
|
|||
if (forbidClosedIndices) {
|
||||
id |= FORBID_CLOSED_INDICES;
|
||||
}
|
||||
if (ignoreAliases) {
|
||||
id |= IGNORE_ALIASES;
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
|
@ -281,6 +307,7 @@ public class IndicesOptions {
|
|||
", expand_wildcards_closed=" + expandWildcardsClosed() +
|
||||
", allow_aliases_to_multiple_indices=" + allowAliasesToMultipleIndices() +
|
||||
", forbid_closed_indices=" + forbidClosedIndices() +
|
||||
", ignore_aliases=" + ignoreAliases() +
|
||||
']';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -106,16 +106,11 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
final List<NodeResponse> responses = new ArrayList<>();
|
||||
final List<FailedNodeException> failures = new ArrayList<>();
|
||||
|
||||
final boolean accumulateExceptions = accumulateExceptions();
|
||||
for (int i = 0; i < nodesResponses.length(); ++i) {
|
||||
Object response = nodesResponses.get(i);
|
||||
|
||||
if (response instanceof FailedNodeException) {
|
||||
if (accumulateExceptions) {
|
||||
failures.add((FailedNodeException)response);
|
||||
} else {
|
||||
logger.warn("not accumulating exceptions, excluding exception from response", (FailedNodeException)response);
|
||||
}
|
||||
failures.add((FailedNodeException)response);
|
||||
} else {
|
||||
responses.add(nodeResponseClass.cast(response));
|
||||
}
|
||||
|
@ -145,8 +140,6 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
return nodeOperation(request);
|
||||
}
|
||||
|
||||
protected abstract boolean accumulateExceptions();
|
||||
|
||||
/**
|
||||
* resolve node ids to concrete nodes of the incoming request
|
||||
**/
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.action.support.replication;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -185,7 +186,15 @@ public class ReplicationOperation<
|
|||
@Override
|
||||
public void onResponse(ReplicaResponse response) {
|
||||
successfulShards.incrementAndGet();
|
||||
primary.updateLocalCheckpointForShard(response.allocationId(), response.localCheckpoint());
|
||||
try {
|
||||
primary.updateLocalCheckpointForShard(response.allocationId(), response.localCheckpoint());
|
||||
} catch (final AlreadyClosedException e) {
|
||||
// okay, the index was deleted or this shard was never activated after a relocation; fall through and finish normally
|
||||
} catch (final Exception e) {
|
||||
// fail the primary but fall through and let the rest of operation processing complete
|
||||
final String message = String.format(Locale.ROOT, "primary failed updating local checkpoint for replica %s", shard);
|
||||
primary.failShard(message, e);
|
||||
}
|
||||
decPendingAndFinishIfNeeded();
|
||||
}
|
||||
|
||||
|
@ -321,7 +330,10 @@ public class ReplicationOperation<
|
|||
ShardRouting routingEntry();
|
||||
|
||||
/**
|
||||
* fail the primary, typically due to the fact that the operation has learned the primary has been demoted by the master
|
||||
* Fail the primary shard.
|
||||
*
|
||||
* @param message the failure message
|
||||
* @param exception the exception that triggered the failure
|
||||
*/
|
||||
void failShard(String message, Exception exception);
|
||||
|
||||
|
@ -335,7 +347,6 @@ public class ReplicationOperation<
|
|||
*/
|
||||
PrimaryResultT perform(RequestT request) throws Exception;
|
||||
|
||||
|
||||
/**
|
||||
* Notifies the primary of a local checkpoint for the given allocation.
|
||||
*
|
||||
|
|
|
@ -226,8 +226,6 @@ public abstract class TransportTasksAction<
|
|||
return false;
|
||||
}
|
||||
|
||||
protected abstract boolean accumulateExceptions();
|
||||
|
||||
private class AsyncAction {
|
||||
|
||||
private final TasksRequest request;
|
||||
|
@ -321,9 +319,9 @@ public abstract class TransportTasksAction<
|
|||
(org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t);
|
||||
}
|
||||
if (accumulateExceptions()) {
|
||||
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
|
||||
}
|
||||
|
||||
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
|
||||
|
||||
if (counter.incrementAndGet() == responses.length()) {
|
||||
finishHim();
|
||||
}
|
||||
|
|
|
@ -334,10 +334,6 @@ public class TermVectorsResponse extends ActionResponse implements ToXContentObj
|
|||
return new TimeValue(tookInMillis);
|
||||
}
|
||||
|
||||
public long getTookInMillis() {
|
||||
return tookInMillis;
|
||||
}
|
||||
|
||||
private void buildScore(XContentBuilder builder, BoostAttribute boostAtt) throws IOException {
|
||||
if (hasScores) {
|
||||
builder.field(FieldStrings.SCORE, boostAtt.getBoost());
|
||||
|
|
|
@ -140,7 +140,7 @@ public class JarHell {
|
|||
URL url = PathUtils.get(element).toUri().toURL();
|
||||
if (urlElements.add(url) == false) {
|
||||
throw new IllegalStateException("jar hell!" + System.lineSeparator() +
|
||||
"duplicate jar on classpath: " + classPath);
|
||||
"duplicate jar [" + element + "] on classpath: " + classPath);
|
||||
}
|
||||
} catch (MalformedURLException e) {
|
||||
// should not happen, as we use the filesystem API
|
||||
|
|
|
@ -46,6 +46,7 @@ import java.security.Policy;
|
|||
import java.security.URIParameter;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -262,8 +263,22 @@ final class Security {
|
|||
if (environment.sharedDataFile() != null) {
|
||||
addPath(policy, Environment.PATH_SHARED_DATA_SETTING.getKey(), environment.sharedDataFile(), "read,readlink,write,delete");
|
||||
}
|
||||
final Set<Path> dataFilesPaths = new HashSet<>();
|
||||
for (Path path : environment.dataFiles()) {
|
||||
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
/*
|
||||
* We have to do this after adding the path because a side effect of that is that the directory is created; the Path#toRealPath
|
||||
* invocation will fail if the directory does not already exist. We use Path#toRealPath to follow symlinks and handle issues
|
||||
* like unicode normalization or case-insensitivity on some filesystems (e.g., the case-insensitive variant of HFS+ on macOS).
|
||||
*/
|
||||
try {
|
||||
final Path realPath = path.toRealPath();
|
||||
if (!dataFilesPaths.add(realPath)) {
|
||||
throw new IllegalStateException("path [" + realPath + "] is duplicated by [" + path + "]");
|
||||
}
|
||||
} catch (final IOException e) {
|
||||
throw new IllegalStateException("unable to access [" + path + "]", e);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* If path.data and default.path.data are set, we need read access to the paths in default.path.data to check for the existence of
|
||||
|
@ -392,11 +407,12 @@ final class Security {
|
|||
}
|
||||
|
||||
/**
|
||||
* Add access to path (and all files underneath it)
|
||||
* @param policy current policy to add permissions to
|
||||
* Add access to path (and all files underneath it); this also creates the directory if it does not exist.
|
||||
*
|
||||
* @param policy current policy to add permissions to
|
||||
* @param configurationName the configuration name associated with the path (for error messages only)
|
||||
* @param path the path itself
|
||||
* @param permissions set of file permissions to grant to the path
|
||||
* @param path the path itself
|
||||
* @param permissions set of file permissions to grant to the path
|
||||
*/
|
||||
static void addPath(Permissions policy, String configurationName, Path path, String permissions) {
|
||||
// paths may not exist yet, this also checks accessibility
|
||||
|
|
|
@ -45,6 +45,9 @@ import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskResponse;
|
|||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
|
||||
|
@ -265,8 +268,37 @@ public interface ClusterAdminClient extends ElasticsearchClient {
|
|||
NodesStatsRequestBuilder prepareNodesStats(String... nodesIds);
|
||||
|
||||
/**
|
||||
* Returns top N hot-threads samples per node. The hot-threads are only sampled
|
||||
* for the node ids specified in the request.
|
||||
* Returns top N hot-threads samples per node. The hot-threads are only
|
||||
* sampled for the node ids specified in the request. Nodes usage of the
|
||||
* cluster.
|
||||
*
|
||||
* @param request
|
||||
* The nodes usage request
|
||||
* @return The result future
|
||||
* @see org.elasticsearch.client.Requests#nodesUsageRequest(String...)
|
||||
*/
|
||||
ActionFuture<NodesUsageResponse> nodesUsage(NodesUsageRequest request);
|
||||
|
||||
/**
|
||||
* Nodes usage of the cluster.
|
||||
*
|
||||
* @param request
|
||||
* The nodes usage request
|
||||
* @param listener
|
||||
* A listener to be notified with a result
|
||||
* @see org.elasticsearch.client.Requests#nodesUsageRequest(String...)
|
||||
*/
|
||||
void nodesUsage(NodesUsageRequest request, ActionListener<NodesUsageResponse> listener);
|
||||
|
||||
/**
|
||||
* Nodes usage of the cluster.
|
||||
*/
|
||||
NodesUsageRequestBuilder prepareNodesUsage(String... nodesIds);
|
||||
|
||||
/**
|
||||
* Returns top N hot-threads samples per node. The hot-threads are only
|
||||
* sampled for the node ids specified in the request.
|
||||
*
|
||||
*/
|
||||
ActionFuture<NodesHotThreadsResponse> nodesHotThreads(NodesHotThreadsRequest request);
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
|
|||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
|
@ -387,6 +388,19 @@ public class Requests {
|
|||
return new NodesStatsRequest(nodesIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a nodes usage request against one or more nodes. Pass
|
||||
* <tt>null</tt> or an empty array for all nodes.
|
||||
*
|
||||
* @param nodesIds
|
||||
* The nodes ids to get the usage for
|
||||
* @return The nodes usage request
|
||||
* @see org.elasticsearch.client.ClusterAdminClient#nodesUsage(org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest)
|
||||
*/
|
||||
public static NodesUsageRequest nodesUsageRequest(String... nodesIds) {
|
||||
return new NodesUsageRequest(nodesIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a cluster stats request.
|
||||
*
|
||||
|
|
|
@ -57,6 +57,10 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
|||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder;
|
||||
|
@ -828,6 +832,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
return new NodesStatsRequestBuilder(this, NodesStatsAction.INSTANCE).setNodesIds(nodesIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<NodesUsageResponse> nodesUsage(final NodesUsageRequest request) {
|
||||
return execute(NodesUsageAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void nodesUsage(final NodesUsageRequest request, final ActionListener<NodesUsageResponse> listener) {
|
||||
execute(NodesUsageAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodesUsageRequestBuilder prepareNodesUsage(String... nodesIds) {
|
||||
return new NodesUsageRequestBuilder(this, NodesUsageAction.INSTANCE).setNodesIds(nodesIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<ClusterStatsResponse> clusterStats(ClusterStatsRequest request) {
|
||||
return execute(ClusterStatsAction.INSTANCE, request);
|
||||
|
|
|
@ -45,8 +45,8 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.InternalSettingsPreparer;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.plugins.NetworkPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
@ -159,7 +159,7 @@ public abstract class TransportClient extends AbstractClient {
|
|||
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
|
||||
ActionModule actionModule = new ActionModule(true, settings, null, settingsModule.getIndexScopedSettings(),
|
||||
settingsModule.getClusterSettings(), settingsModule.getSettingsFilter(), threadPool,
|
||||
pluginsService.filterPlugins(ActionPlugin.class), null, null);
|
||||
pluginsService.filterPlugins(ActionPlugin.class), null, null, null);
|
||||
modules.add(actionModule);
|
||||
|
||||
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
|
||||
|
|
|
@ -375,7 +375,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception {
|
||||
logger.debug("{} received shard started for [{}]", request.shardId, request);
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-started",
|
||||
"shard-started " + request,
|
||||
request,
|
||||
ClusterStateTaskConfig.build(Priority.URGENT),
|
||||
shardStartedClusterStateTaskExecutor,
|
||||
|
|
|
@ -50,6 +50,7 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
@ -104,7 +105,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
return concreteIndexNames(context, indexExpressions);
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Translates the provided index expression into actual concrete indices, properly deduplicated.
|
||||
*
|
||||
* @param state the cluster state containing all the data to resolve to expressions to concrete indices
|
||||
|
@ -181,7 +182,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
final Set<Index> concreteIndices = new HashSet<>(expressions.size());
|
||||
for (String expression : expressions) {
|
||||
AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(expression);
|
||||
if (aliasOrIndex == null) {
|
||||
if (aliasOrIndex == null || (aliasOrIndex.isAlias() && context.getOptions().ignoreAliases())) {
|
||||
if (failNoIndices) {
|
||||
IndexNotFoundException infe = new IndexNotFoundException(expression);
|
||||
infe.setResources("index_expression", expression);
|
||||
|
@ -638,7 +639,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
}
|
||||
|
||||
final IndexMetaData.State excludeState = excludeState(options);
|
||||
final Map<String, AliasOrIndex> matches = matches(metaData, expression);
|
||||
final Map<String, AliasOrIndex> matches = matches(context, metaData, expression);
|
||||
Set<String> expand = expand(context, excludeState, matches);
|
||||
if (add) {
|
||||
result.addAll(expand);
|
||||
|
@ -693,31 +694,44 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
return excludeState;
|
||||
}
|
||||
|
||||
private static Map<String, AliasOrIndex> matches(MetaData metaData, String expression) {
|
||||
public static Map<String, AliasOrIndex> matches(Context context, MetaData metaData, String expression) {
|
||||
if (Regex.isMatchAllPattern(expression)) {
|
||||
// Can only happen if the expressions was initially: '-*'
|
||||
return metaData.getAliasAndIndexLookup();
|
||||
if (context.getOptions().ignoreAliases()) {
|
||||
return metaData.getAliasAndIndexLookup().entrySet().stream()
|
||||
.filter(e -> e.getValue().isAlias() == false)
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
} else {
|
||||
return metaData.getAliasAndIndexLookup();
|
||||
}
|
||||
} else if (expression.indexOf("*") == expression.length() - 1) {
|
||||
return suffixWildcard(metaData, expression);
|
||||
return suffixWildcard(context, metaData, expression);
|
||||
} else {
|
||||
return otherWildcard(metaData, expression);
|
||||
return otherWildcard(context, metaData, expression);
|
||||
}
|
||||
}
|
||||
|
||||
private static Map<String, AliasOrIndex> suffixWildcard(MetaData metaData, String expression) {
|
||||
private static Map<String, AliasOrIndex> suffixWildcard(Context context, MetaData metaData, String expression) {
|
||||
assert expression.length() >= 2 : "expression [" + expression + "] should have at least a length of 2";
|
||||
String fromPrefix = expression.substring(0, expression.length() - 1);
|
||||
char[] toPrefixCharArr = fromPrefix.toCharArray();
|
||||
toPrefixCharArr[toPrefixCharArr.length - 1]++;
|
||||
String toPrefix = new String(toPrefixCharArr);
|
||||
return metaData.getAliasAndIndexLookup().subMap(fromPrefix, toPrefix);
|
||||
SortedMap<String,AliasOrIndex> subMap = metaData.getAliasAndIndexLookup().subMap(fromPrefix, toPrefix);
|
||||
if (context.getOptions().ignoreAliases()) {
|
||||
return subMap.entrySet().stream()
|
||||
.filter(entry -> entry.getValue().isAlias() == false)
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
}
|
||||
return subMap;
|
||||
}
|
||||
|
||||
private static Map<String, AliasOrIndex> otherWildcard(MetaData metaData, String expression) {
|
||||
private static Map<String, AliasOrIndex> otherWildcard(Context context, MetaData metaData, String expression) {
|
||||
final String pattern = expression;
|
||||
return metaData.getAliasAndIndexLookup()
|
||||
.entrySet()
|
||||
.stream()
|
||||
.filter(e -> context.getOptions().ignoreAliases() == false || e.getValue().isAlias() == false)
|
||||
.filter(e -> Regex.simpleMatch(pattern, e.getKey()))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
}
|
||||
|
|
|
@ -243,7 +243,8 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
|
|||
*
|
||||
* @param aliases The names of the index aliases to find
|
||||
* @param concreteIndices The concrete indexes the index aliases must point to order to be returned.
|
||||
* @return the found index aliases grouped by index
|
||||
* @return a map of index to a list of alias metadata, the list corresponding to a concrete index will be empty if no aliases are
|
||||
* present for that index
|
||||
*/
|
||||
public ImmutableOpenMap<String, List<AliasMetaData>> findAliases(final String[] aliases, String[] concreteIndices) {
|
||||
assert aliases != null;
|
||||
|
@ -273,8 +274,8 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
|
|||
return o1.alias().compareTo(o2.alias());
|
||||
}
|
||||
});
|
||||
mapBuilder.put(index, Collections.unmodifiableList(filteredValues));
|
||||
}
|
||||
mapBuilder.put(index, Collections.unmodifiableList(filteredValues));
|
||||
}
|
||||
return mapBuilder.build();
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue