Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
cf35545e2d
|
@ -165,8 +165,6 @@ end
|
|||
def sles_common(config)
|
||||
extra = <<-SHELL
|
||||
zypper rr systemsmanagement_puppet puppetlabs-pc1
|
||||
zypper addrepo -t yast2 http://demeter.uni-regensburg.de/SLES12-x64/DVD1/ dvd1 || true
|
||||
zypper --no-gpg-checks --non-interactive refresh
|
||||
zypper --non-interactive install git-core
|
||||
SHELL
|
||||
suse_common config, extra
|
||||
|
@ -193,7 +191,15 @@ def provision(config,
|
|||
raise ArgumentError.new('update_command is required') if update_command == 'required'
|
||||
raise ArgumentError.new('update_tracking_file is required') if update_tracking_file == 'required'
|
||||
raise ArgumentError.new('install_command is required') if install_command == 'required'
|
||||
config.vm.provision "bats dependencies", type: "shell", inline: <<-SHELL
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
# Give the box more memory and cpu because our tests are beasts!
|
||||
v.memory = Integer(ENV['VAGRANT_MEMORY'] || 8192)
|
||||
v.cpus = Integer(ENV['VAGRANT_CPUS'] || 4)
|
||||
end
|
||||
config.vm.synced_folder "#{Dir.home}/.gradle/caches", "/home/vagrant/.gradle/caches",
|
||||
create: true,
|
||||
owner: "vagrant"
|
||||
config.vm.provision "dependencies", type: "shell", inline: <<-SHELL
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
|
@ -256,6 +262,18 @@ def provision(config,
|
|||
/tmp/bats/install.sh /usr
|
||||
rm -rf /tmp/bats
|
||||
}
|
||||
|
||||
installed gradle || {
|
||||
echo "==> Installing Gradle"
|
||||
curl -sS -o /tmp/gradle.zip -L https://services.gradle.org/distributions/gradle-3.3-bin.zip
|
||||
unzip /tmp/gradle.zip -d /opt
|
||||
rm -rf /tmp/gradle.zip
|
||||
ln -s /opt/gradle-3.3/bin/gradle /usr/bin/gradle
|
||||
# make nfs mounted gradle home dir writeable
|
||||
chown vagrant:vagrant /home/vagrant/.gradle
|
||||
}
|
||||
|
||||
|
||||
cat \<\<VARS > /etc/profile.d/elasticsearch_vars.sh
|
||||
export ZIP=/elasticsearch/distribution/zip/build/distributions
|
||||
export TAR=/elasticsearch/distribution/tar/build/distributions
|
||||
|
@ -265,6 +283,7 @@ export BATS=/project/build/bats
|
|||
export BATS_UTILS=/project/build/bats/utils
|
||||
export BATS_TESTS=/project/build/bats/tests
|
||||
export BATS_ARCHIVES=/project/build/bats/archives
|
||||
export GRADLE_HOME=/opt/gradle-3.3
|
||||
VARS
|
||||
cat \<\<SUDOERS_VARS > /etc/sudoers.d/elasticsearch_vars
|
||||
Defaults env_keep += "ZIP"
|
||||
|
|
|
@ -67,6 +67,10 @@ class RandomizedTestingTask extends DefaultTask implements ProgressLoggerFactory
|
|||
@Input
|
||||
String ifNoTests = 'ignore'
|
||||
|
||||
@Optional
|
||||
@Input
|
||||
String onNonEmptyWorkDirectory = 'fail'
|
||||
|
||||
TestLoggingConfiguration testLoggingConfig = new TestLoggingConfiguration()
|
||||
|
||||
BalancersConfiguration balancersConfig = new BalancersConfiguration(task: this)
|
||||
|
@ -193,6 +197,7 @@ class RandomizedTestingTask extends DefaultTask implements ProgressLoggerFactory
|
|||
shuffleOnSlave: shuffleOnSlave,
|
||||
leaveTemporary: leaveTemporary,
|
||||
ifNoTests: ifNoTests,
|
||||
onNonEmptyWorkDirectory: onNonEmptyWorkDirectory,
|
||||
newenvironment: true
|
||||
]
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.gradle
|
|||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
|
@ -202,14 +203,28 @@ class BuildPlugin implements Plugin<Project> {
|
|||
|
||||
/** Runs the given javascript using jjs from the jdk, and returns the output */
|
||||
private static String runJavascript(Project project, String javaHome, String script) {
|
||||
ByteArrayOutputStream output = new ByteArrayOutputStream()
|
||||
project.exec {
|
||||
executable = new File(javaHome, 'bin/jrunscript')
|
||||
args '-e', script
|
||||
standardOutput = output
|
||||
errorOutput = new ByteArrayOutputStream()
|
||||
ByteArrayOutputStream stdout = new ByteArrayOutputStream()
|
||||
ByteArrayOutputStream stderr = new ByteArrayOutputStream()
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
// gradle/groovy does not properly escape the double quote for windows
|
||||
script = script.replace('"', '\\"')
|
||||
}
|
||||
return output.toString('UTF-8').trim()
|
||||
File jrunscriptPath = new File(javaHome, 'bin/jrunscript')
|
||||
ExecResult result = project.exec {
|
||||
executable = jrunscriptPath
|
||||
args '-e', script
|
||||
standardOutput = stdout
|
||||
errorOutput = stderr
|
||||
ignoreExitValue = true
|
||||
}
|
||||
if (result.exitValue != 0) {
|
||||
project.logger.error("STDOUT:")
|
||||
stdout.toString('UTF-8').eachLine { line -> project.logger.error(line) }
|
||||
project.logger.error("STDERR:")
|
||||
stderr.toString('UTF-8').eachLine { line -> project.logger.error(line) }
|
||||
result.rethrowFailure()
|
||||
}
|
||||
return stdout.toString('UTF-8').trim()
|
||||
}
|
||||
|
||||
/** Return the configuration name used for finding transitive deps of the given dependency. */
|
||||
|
@ -464,7 +479,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
'Build-Java-Version': project.javaVersion)
|
||||
if (jarTask.manifest.attributes.containsKey('Change') == false) {
|
||||
logger.warn('Building without git revision id.')
|
||||
jarTask.manifest.attributes('Change': 'N/A')
|
||||
jarTask.manifest.attributes('Change': 'Unknown')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -476,6 +491,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
jvm "${project.javaHome}/bin/java"
|
||||
parallelism System.getProperty('tests.jvms', 'auto')
|
||||
ifNoTests 'fail'
|
||||
onNonEmptyWorkDirectory 'wipe'
|
||||
leaveTemporary true
|
||||
|
||||
// TODO: why are we not passing maxmemory to junit4?
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.gradle
|
|||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.tasks.InputFile
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
|
@ -30,8 +31,11 @@ import org.gradle.api.tasks.TaskAction
|
|||
*/
|
||||
public class NoticeTask extends DefaultTask {
|
||||
|
||||
@InputFile
|
||||
File inputFile = project.rootProject.file('NOTICE.txt')
|
||||
|
||||
@OutputFile
|
||||
File noticeFile = new File(project.buildDir, "notices/${name}/NOTICE.txt")
|
||||
File outputFile = new File(project.buildDir, "notices/${name}/NOTICE.txt")
|
||||
|
||||
/** Configurations to inspect dependencies*/
|
||||
private List<Project> dependencies = new ArrayList<>()
|
||||
|
@ -48,7 +52,7 @@ public class NoticeTask extends DefaultTask {
|
|||
@TaskAction
|
||||
public void generateNotice() {
|
||||
StringBuilder output = new StringBuilder()
|
||||
output.append(project.rootProject.file('NOTICE.txt').getText('UTF-8'))
|
||||
output.append(inputFile.getText('UTF-8'))
|
||||
output.append('\n\n')
|
||||
Set<String> seen = new HashSet<>()
|
||||
for (Project dep : dependencies) {
|
||||
|
@ -61,7 +65,7 @@ public class NoticeTask extends DefaultTask {
|
|||
seen.add(file.name)
|
||||
}
|
||||
}
|
||||
noticeFile.setText(output.toString(), 'UTF-8')
|
||||
outputFile.setText(output.toString(), 'UTF-8')
|
||||
}
|
||||
|
||||
static void appendFile(File file, String name, String type, StringBuilder output) {
|
||||
|
|
|
@ -257,10 +257,11 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
include(licenseFile.name)
|
||||
}
|
||||
}
|
||||
File noticeFile = project.pluginProperties.extension.licenseFile
|
||||
File noticeFile = project.pluginProperties.extension.noticeFile
|
||||
if (noticeFile != null) {
|
||||
NoticeTask generateNotice = project.tasks.create('generateNotice', NoticeTask.class)
|
||||
generateNotice.dependencies(project)
|
||||
generateNotice.inputFile = noticeFile
|
||||
project.bundlePlugin.from(generateNotice)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,6 +86,9 @@ public class DependencyLicensesTask extends DefaultTask {
|
|||
/** A map of patterns to prefix, used to find the LICENSE and NOTICE file. */
|
||||
private LinkedHashMap<String, String> mappings = new LinkedHashMap<>()
|
||||
|
||||
/** Names of dependencies whose shas should not exist. */
|
||||
private Set<String> ignoreShas = new HashSet<>()
|
||||
|
||||
/**
|
||||
* Add a mapping from a regex pattern for the jar name, to a prefix to find
|
||||
* the LICENSE and NOTICE file for that jar.
|
||||
|
@ -106,6 +109,15 @@ public class DependencyLicensesTask extends DefaultTask {
|
|||
mappings.put(from, to)
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a rule which will skip SHA checking for the given dependency name. This should be used for
|
||||
* locally build dependencies, which cause the sha to change constantly.
|
||||
*/
|
||||
@Input
|
||||
public void ignoreSha(String dep) {
|
||||
ignoreShas.add(dep)
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
public void checkDependencies() {
|
||||
if (dependencies.isEmpty()) {
|
||||
|
@ -139,19 +151,27 @@ public class DependencyLicensesTask extends DefaultTask {
|
|||
|
||||
for (File dependency : dependencies) {
|
||||
String jarName = dependency.getName()
|
||||
logger.info("Checking license/notice/sha for " + jarName)
|
||||
checkSha(dependency, jarName, shaFiles)
|
||||
String depName = jarName - ~/\-\d+.*/
|
||||
if (ignoreShas.contains(depName)) {
|
||||
// local deps should not have sha files!
|
||||
if (getShaFile(jarName).exists()) {
|
||||
throw new GradleException("SHA file ${getShaFile(jarName)} exists for ignored dependency ${depName}")
|
||||
}
|
||||
} else {
|
||||
logger.info("Checking sha for " + jarName)
|
||||
checkSha(dependency, jarName, shaFiles)
|
||||
}
|
||||
|
||||
String name = jarName - ~/\-\d+.*/
|
||||
Matcher match = mappingsPattern.matcher(name)
|
||||
logger.info("Checking license/notice for " + depName)
|
||||
Matcher match = mappingsPattern.matcher(depName)
|
||||
if (match.matches()) {
|
||||
int i = 0
|
||||
while (i < match.groupCount() && match.group(i + 1) == null) ++i;
|
||||
logger.info("Mapped dependency name ${name} to ${mapped.get(i)} for license check")
|
||||
name = mapped.get(i)
|
||||
logger.info("Mapped dependency name ${depName} to ${mapped.get(i)} for license check")
|
||||
depName = mapped.get(i)
|
||||
}
|
||||
checkFile(name, jarName, licenses, 'LICENSE')
|
||||
checkFile(name, jarName, notices, 'NOTICE')
|
||||
checkFile(depName, jarName, licenses, 'LICENSE')
|
||||
checkFile(depName, jarName, notices, 'NOTICE')
|
||||
}
|
||||
|
||||
licenses.each { license, count ->
|
||||
|
@ -169,8 +189,12 @@ public class DependencyLicensesTask extends DefaultTask {
|
|||
}
|
||||
}
|
||||
|
||||
private File getShaFile(String jarName) {
|
||||
return new File(licensesDir, jarName + SHA_EXTENSION)
|
||||
}
|
||||
|
||||
private void checkSha(File jar, String jarName, Set<File> shaFiles) {
|
||||
File shaFile = new File(licensesDir, jarName + SHA_EXTENSION)
|
||||
File shaFile = getShaFile(jarName)
|
||||
if (shaFile.exists() == false) {
|
||||
throw new GradleException("Missing SHA for ${jarName}. Run 'gradle updateSHAs' to create")
|
||||
}
|
||||
|
@ -215,6 +239,10 @@ public class DependencyLicensesTask extends DefaultTask {
|
|||
}
|
||||
for (File dependency : parentTask.dependencies) {
|
||||
String jarName = dependency.getName()
|
||||
String depName = jarName - ~/\-\d+.*/
|
||||
if (parentTask.ignoreShas.contains(depName)) {
|
||||
continue
|
||||
}
|
||||
File shaFile = new File(parentTask.licensesDir, jarName + SHA_EXTENSION)
|
||||
if (shaFile.exists() == false) {
|
||||
logger.lifecycle("Adding sha for ${jarName}")
|
||||
|
|
|
@ -41,6 +41,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
|
||||
private static final BATS = 'bats'
|
||||
private static final String BATS_TEST_COMMAND ="cd \$BATS_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS"
|
||||
private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && \$GRADLE_HOME/bin/gradle test integTest"
|
||||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
|
@ -327,6 +328,17 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
}
|
||||
}
|
||||
|
||||
private static void createPlatformTestTask(Project project) {
|
||||
project.tasks.create('platformTest') {
|
||||
group 'Verification'
|
||||
description "Test unit and integ tests on different platforms using vagrant.\n" +
|
||||
" Specify the vagrant boxes to test using the gradle property 'vagrant.boxes'.\n" +
|
||||
" 'all' can be used to test all available boxes. The available boxes are: \n" +
|
||||
" ${BOXES}"
|
||||
dependsOn 'vagrantCheckVersion'
|
||||
}
|
||||
}
|
||||
|
||||
private static void createVagrantTasks(Project project) {
|
||||
createCleanTask(project)
|
||||
createStopTask(project)
|
||||
|
@ -337,6 +349,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
createCheckVirtualBoxVersionTask(project)
|
||||
createPrepareVagrantTestEnvTask(project)
|
||||
createPackagingTestTask(project)
|
||||
createPlatformTestTask(project)
|
||||
}
|
||||
|
||||
private static void createVagrantBoxesTasks(Project project) {
|
||||
|
@ -360,6 +373,9 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
assert project.tasks.packagingTest != null
|
||||
Task packagingTest = project.tasks.packagingTest
|
||||
|
||||
assert project.tasks.platformTest != null
|
||||
Task platformTest = project.tasks.platformTest
|
||||
|
||||
/*
|
||||
* We always use the main project.rootDir as Vagrant's current working directory (VAGRANT_CWD)
|
||||
* so that boxes are not duplicated for every Gradle project that use this VagrantTestPlugin.
|
||||
|
@ -425,7 +441,8 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
finalizedBy halt
|
||||
command BATS_TEST_COMMAND
|
||||
}
|
||||
TaskExecutionAdapter reproduceListener = new TaskExecutionAdapter() {
|
||||
|
||||
TaskExecutionAdapter packagingReproListener = new TaskExecutionAdapter() {
|
||||
@Override
|
||||
void afterExecute(Task task, TaskState state) {
|
||||
if (state.failure != null) {
|
||||
|
@ -435,15 +452,40 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
}
|
||||
}
|
||||
packaging.doFirst {
|
||||
project.gradle.addListener(reproduceListener)
|
||||
project.gradle.addListener(packagingReproListener)
|
||||
}
|
||||
packaging.doLast {
|
||||
project.gradle.removeListener(reproduceListener)
|
||||
project.gradle.removeListener(packagingReproListener)
|
||||
}
|
||||
|
||||
if (project.extensions.esvagrant.boxes.contains(box)) {
|
||||
packagingTest.dependsOn(packaging)
|
||||
}
|
||||
|
||||
Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) {
|
||||
boxName box
|
||||
environmentVars vagrantEnvVars
|
||||
dependsOn up
|
||||
finalizedBy halt
|
||||
args 'ssh', boxName, '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.extensions.esvagrant.formattedTestSeed}"
|
||||
}
|
||||
TaskExecutionAdapter platformReproListener = new TaskExecutionAdapter() {
|
||||
@Override
|
||||
void afterExecute(Task task, TaskState state) {
|
||||
if (state.failure != null) {
|
||||
println "REPRODUCE WITH: gradle ${platform.path} " +
|
||||
"-Dtests.seed=${project.extensions.esvagrant.formattedTestSeed} "
|
||||
}
|
||||
}
|
||||
}
|
||||
packaging.doFirst {
|
||||
project.gradle.addListener(platformReproListener)
|
||||
}
|
||||
packaging.doLast {
|
||||
project.gradle.removeListener(platformReproListener)
|
||||
}
|
||||
if (project.extensions.esvagrant.boxes.contains(box)) {
|
||||
platformTest.dependsOn(platform)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -159,7 +159,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]MultiSearchRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]ShardSearchFailure.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportClearScrollAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportMultiSearchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]suggest[/\\]SuggestResponse.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]ActionFilter.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]DelegatingActionListener.java" checks="LineLength" />
|
||||
|
@ -437,18 +436,14 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalMultiBucketAggregation.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]ValuesSourceAggregationBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]BucketsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]InternalSingleBucketAggregation.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]children[/\\]ChildrenParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]children[/\\]ParentToChildrenAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]filter[/\\]InternalFilter.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]filters[/\\]FiltersAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]filters[/\\]FiltersParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]filters[/\\]InternalFilters.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]geogrid[/\\]GeoHashGridAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]histogram[/\\]HistogramAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]missing[/\\]InternalMissing.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]missing[/\\]MissingAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]InternalReverseNested.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]ReverseNestedAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]RangeAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]GlobalOrdinalsSignificantTermsAggregator.java" checks="LineLength" />
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
|
||||
elasticsearch = 6.0.0-alpha1
|
||||
lucene = 6.5.0-snapshot-f919485
|
||||
lucene = 6.5.0-snapshot-d00c5ca
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
|
@ -13,7 +13,7 @@ slf4j = 1.6.2
|
|||
jna = 4.2.2
|
||||
|
||||
# test dependencies
|
||||
randomizedrunner = 2.4.0
|
||||
randomizedrunner = 2.5.0
|
||||
junit = 4.11
|
||||
httpclient = 4.5.2
|
||||
# When updating httpcore, please also update core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
|
||||
|
|
|
@ -29,8 +29,8 @@ import org.apache.http.entity.ByteArrayEntity;
|
|||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
|
@ -97,6 +97,10 @@ final class Request {
|
|||
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
}
|
||||
|
||||
static Request info() {
|
||||
return new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
}
|
||||
|
||||
static Request bulk(BulkRequest bulkRequest) throws IOException {
|
||||
Params parameters = Params.builder();
|
||||
parameters.withTimeout(bulkRequest.timeout());
|
||||
|
@ -264,7 +268,7 @@ final class Request {
|
|||
}
|
||||
|
||||
static Request ping() {
|
||||
return new Request("HEAD", "/", Collections.emptyMap(), null);
|
||||
return new Request(HttpHead.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
}
|
||||
|
||||
static Request update(UpdateRequest updateRequest) throws IOException {
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.action.get.GetResponse;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.main.MainRequest;
|
||||
import org.elasticsearch.action.main.MainResponse;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
|
@ -113,6 +114,14 @@ public class RestHighLevelClient {
|
|||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cluster info otherwise provided when sending an HTTP request to port 9200
|
||||
*/
|
||||
public MainResponse info(Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(new MainRequest(), (request) -> Request.info(), MainResponse::fromXContent, emptySet(),
|
||||
headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a document by id using the Get API
|
||||
*
|
||||
|
|
|
@ -588,10 +588,10 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
DocWriteRequest.OpType requestOpType = bulkRequest.requests().get(i).opType();
|
||||
if (requestOpType == DocWriteRequest.OpType.INDEX || requestOpType == DocWriteRequest.OpType.CREATE) {
|
||||
assertEquals(errors[i], bulkItemResponse.isFailed());
|
||||
assertEquals(errors[i] ? RestStatus.INTERNAL_SERVER_ERROR : RestStatus.CREATED, bulkItemResponse.status());
|
||||
assertEquals(errors[i] ? RestStatus.CONFLICT : RestStatus.CREATED, bulkItemResponse.status());
|
||||
} else if (requestOpType == DocWriteRequest.OpType.UPDATE) {
|
||||
assertEquals(errors[i], bulkItemResponse.isFailed());
|
||||
assertEquals(errors[i] ? RestStatus.INTERNAL_SERVER_ERROR : RestStatus.OK, bulkItemResponse.status());
|
||||
assertEquals(errors[i] ? RestStatus.NOT_FOUND : RestStatus.OK, bulkItemResponse.status());
|
||||
} else if (requestOpType == DocWriteRequest.OpType.DELETE) {
|
||||
assertFalse(bulkItemResponse.isFailed());
|
||||
assertEquals(errors[i] ? RestStatus.NOT_FOUND : RestStatus.OK, bulkItemResponse.status());
|
||||
|
|
|
@ -41,7 +41,7 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
|
|||
}
|
||||
|
||||
@AfterClass
|
||||
public static void cleanupClient() throws IOException {
|
||||
public static void cleanupClient() {
|
||||
restHighLevelClient = null;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,10 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.action.main.MainResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
public class PingAndInfoIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
|
@ -27,5 +30,22 @@ public class PingAndInfoIT extends ESRestHighLevelClientTestCase {
|
|||
assertTrue(highLevelClient().ping());
|
||||
}
|
||||
|
||||
//TODO add here integ tests for info api: "GET /" once we have parsing code for MainResponse
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testInfo() throws IOException {
|
||||
MainResponse info = highLevelClient().info();
|
||||
// compare with what the low level client outputs
|
||||
Map<String, Object> infoAsMap = entityAsMap(adminClient().performRequest("GET", "/"));
|
||||
assertEquals(infoAsMap.get("cluster_name"), info.getClusterName().value());
|
||||
assertEquals(infoAsMap.get("cluster_uuid"), info.getClusterUuid());
|
||||
|
||||
// only check node name existence, might be a different one from what was hit by low level client in multi-node cluster
|
||||
assertNotNull(info.getNodeName());
|
||||
Map<String, Object> versionMap = (Map<String, Object>) infoAsMap.get("version");
|
||||
assertEquals(versionMap.get("build_hash"), info.getBuild().shortHash());
|
||||
assertEquals(versionMap.get("build_date"), info.getBuild().date());
|
||||
assertEquals(versionMap.get("build_snapshot"), info.getBuild().isSnapshot());
|
||||
assertEquals(versionMap.get("number"), info.getVersion().toString());
|
||||
assertEquals(versionMap.get("lucene_version"), info.getVersion().luceneVersion.toString());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -66,6 +66,14 @@ public class RequestTests extends ESTestCase {
|
|||
assertEquals("HEAD", request.method);
|
||||
}
|
||||
|
||||
public void testInfo() {
|
||||
Request request = Request.info();
|
||||
assertEquals("/", request.endpoint);
|
||||
assertEquals(0, request.params.size());
|
||||
assertNull(request.entity);
|
||||
assertEquals("GET", request.method);
|
||||
}
|
||||
|
||||
public void testGet() {
|
||||
getAndExistsTest(Request::get, "GET");
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonParseException;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
|
@ -33,15 +34,20 @@ import org.apache.http.entity.StringEntity;
|
|||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicRequestLine;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.main.MainRequest;
|
||||
import org.elasticsearch.action.main.MainResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.cbor.CborXContent;
|
||||
import org.elasticsearch.common.xcontent.smile.SmileXContent;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -59,6 +65,7 @@ import java.util.List;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.mockito.Matchers.anyMapOf;
|
||||
import static org.mockito.Matchers.anyObject;
|
||||
|
@ -79,7 +86,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
private RestHighLevelClient restHighLevelClient;
|
||||
|
||||
@Before
|
||||
public void initClient() throws IOException {
|
||||
public void initClient() {
|
||||
restClient = mock(RestClient.class);
|
||||
restHighLevelClient = new RestHighLevelClient(restClient);
|
||||
}
|
||||
|
@ -115,6 +122,21 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
Matchers.isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testInfo() throws IOException {
|
||||
Header[] headers = RestClientTestUtil.randomHeaders(random(), "Header");
|
||||
Response response = mock(Response.class);
|
||||
MainResponse testInfo = new MainResponse("nodeName", Version.CURRENT, new ClusterName("clusterName"), "clusterUuid",
|
||||
Build.CURRENT, true);
|
||||
when(response.getEntity()).thenReturn(
|
||||
new StringEntity(toXContent(testInfo, XContentType.JSON, false).utf8ToString(), ContentType.APPLICATION_JSON));
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(response);
|
||||
MainResponse receivedInfo = restHighLevelClient.info(headers);
|
||||
assertEquals(testInfo, receivedInfo);
|
||||
verify(restClient).performRequest(eq("GET"), eq("/"), eq(Collections.emptyMap()),
|
||||
Matchers.isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testRequestValidation() {
|
||||
ActionRequestValidationException validationException = new ActionRequestValidationException();
|
||||
validationException.addValidationError("validation error");
|
||||
|
@ -388,7 +410,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
|
||||
}
|
||||
|
||||
public void testWrapResponseListenerOnSuccess() throws IOException {
|
||||
public void testWrapResponseListenerOnSuccess() {
|
||||
{
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
|
||||
|
@ -414,7 +436,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testWrapResponseListenerOnException() throws IOException {
|
||||
public void testWrapResponseListenerOnException() {
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
|
||||
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
|
||||
|
@ -543,7 +565,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
|
||||
}
|
||||
|
||||
public void testNamedXContents() throws IOException {
|
||||
public void testNamedXContents() {
|
||||
List<NamedXContentRegistry.Entry> namedXContents = RestHighLevelClient.getNamedXContents();
|
||||
assertEquals(0, namedXContents.size());
|
||||
}
|
||||
|
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* This class is used to generate the Java Delete API documentation.
|
||||
* You need to wrap your code between two tags like:
|
||||
* // tag::example[]
|
||||
* // end::example[]
|
||||
*
|
||||
* Where example is your tag name.
|
||||
*
|
||||
* Then in the documentation, you can extract what is between tag and end tags with
|
||||
* ["source","java",subs="attributes,callouts"]
|
||||
* --------------------------------------------------
|
||||
* sys2::[perl -ne 'exit if /end::example/; print if $tag; $tag = $tag || /tag::example/' \
|
||||
* {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DeleteDocumentationIT.java]
|
||||
* --------------------------------------------------
|
||||
*/
|
||||
public class DeleteDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
/**
|
||||
* This test documents docs/java-rest/high-level/document/delete.asciidoc
|
||||
*/
|
||||
public void testDelete() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::delete-request[]
|
||||
DeleteRequest request = new DeleteRequest(
|
||||
"index", // <1>
|
||||
"type", // <2>
|
||||
"id"); // <3>
|
||||
// end::delete-request[]
|
||||
|
||||
// tag::delete-request-props[]
|
||||
request.timeout(TimeValue.timeValueSeconds(1)); // <1>
|
||||
request.timeout("1s"); // <2>
|
||||
request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); // <3>
|
||||
request.setRefreshPolicy("wait_for"); // <4>
|
||||
request.version(2); // <5>
|
||||
request.versionType(VersionType.EXTERNAL); // <6>
|
||||
// end::delete-request-props[]
|
||||
|
||||
// tag::delete-execute[]
|
||||
DeleteResponse response = client.delete(request);
|
||||
// end::delete-execute[]
|
||||
|
||||
try {
|
||||
// tag::delete-notfound[]
|
||||
if (response.getResult().equals(DocWriteResponse.Result.NOT_FOUND)) {
|
||||
throw new Exception("Can't find document to be removed"); // <1>
|
||||
}
|
||||
// end::delete-notfound[]
|
||||
} catch (Exception ignored) { }
|
||||
|
||||
// tag::delete-execute-async[]
|
||||
client.deleteAsync(request, new ActionListener<DeleteResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteResponse deleteResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
// end::delete-execute-async[]
|
||||
|
||||
// tag::delete-conflict[]
|
||||
try {
|
||||
client.delete(request);
|
||||
} catch (ElasticsearchException exception) {
|
||||
if (exception.status().equals(RestStatus.CONFLICT)) {
|
||||
// <1>
|
||||
}
|
||||
}
|
||||
// end::delete-conflict[]
|
||||
|
||||
}
|
||||
}
|
|
@ -289,40 +289,44 @@ public class RestClient implements Closeable {
|
|||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
Objects.requireNonNull(params, "params must not be null");
|
||||
Map<String, String> requestParams = new HashMap<>(params);
|
||||
//ignore is a special parameter supported by the clients, shouldn't be sent to es
|
||||
String ignoreString = requestParams.remove("ignore");
|
||||
Set<Integer> ignoreErrorCodes;
|
||||
if (ignoreString == null) {
|
||||
if (HttpHead.METHOD_NAME.equals(method)) {
|
||||
//404 never causes error if returned for a HEAD request
|
||||
ignoreErrorCodes = Collections.singleton(404);
|
||||
try {
|
||||
Objects.requireNonNull(params, "params must not be null");
|
||||
Map<String, String> requestParams = new HashMap<>(params);
|
||||
//ignore is a special parameter supported by the clients, shouldn't be sent to es
|
||||
String ignoreString = requestParams.remove("ignore");
|
||||
Set<Integer> ignoreErrorCodes;
|
||||
if (ignoreString == null) {
|
||||
if (HttpHead.METHOD_NAME.equals(method)) {
|
||||
//404 never causes error if returned for a HEAD request
|
||||
ignoreErrorCodes = Collections.singleton(404);
|
||||
} else {
|
||||
ignoreErrorCodes = Collections.emptySet();
|
||||
}
|
||||
} else {
|
||||
ignoreErrorCodes = Collections.emptySet();
|
||||
}
|
||||
} else {
|
||||
String[] ignoresArray = ignoreString.split(",");
|
||||
ignoreErrorCodes = new HashSet<>();
|
||||
if (HttpHead.METHOD_NAME.equals(method)) {
|
||||
//404 never causes error if returned for a HEAD request
|
||||
ignoreErrorCodes.add(404);
|
||||
}
|
||||
for (String ignoreCode : ignoresArray) {
|
||||
try {
|
||||
ignoreErrorCodes.add(Integer.valueOf(ignoreCode));
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead", e);
|
||||
String[] ignoresArray = ignoreString.split(",");
|
||||
ignoreErrorCodes = new HashSet<>();
|
||||
if (HttpHead.METHOD_NAME.equals(method)) {
|
||||
//404 never causes error if returned for a HEAD request
|
||||
ignoreErrorCodes.add(404);
|
||||
}
|
||||
for (String ignoreCode : ignoresArray) {
|
||||
try {
|
||||
ignoreErrorCodes.add(Integer.valueOf(ignoreCode));
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
URI uri = buildUri(pathPrefix, endpoint, requestParams);
|
||||
HttpRequestBase request = createHttpRequest(method, uri, entity);
|
||||
setHeaders(request, headers);
|
||||
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener);
|
||||
long startTime = System.nanoTime();
|
||||
performRequestAsync(startTime, nextHost(), request, ignoreErrorCodes, httpAsyncResponseConsumerFactory,
|
||||
failureTrackingResponseListener);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
}
|
||||
URI uri = buildUri(pathPrefix, endpoint, requestParams);
|
||||
HttpRequestBase request = createHttpRequest(method, uri, entity);
|
||||
setHeaders(request, headers);
|
||||
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener);
|
||||
long startTime = System.nanoTime();
|
||||
performRequestAsync(startTime, nextHost(), request, ignoreErrorCodes, httpAsyncResponseConsumerFactory,
|
||||
failureTrackingResponseListener);
|
||||
}
|
||||
|
||||
private void performRequestAsync(final long startTime, final HostTuple<Iterator<HttpHost>> hostTuple, final HttpRequestBase request,
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class RestClientTests extends RestClientTestCase {
|
||||
|
||||
public void testPerformAsyncWithUnsupportedMethod() throws Exception {
|
||||
RestClient.SyncResponseListener listener = new RestClient.SyncResponseListener(10000);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.performRequestAsync("unsupported", randomAsciiOfLength(5), listener);
|
||||
listener.get();
|
||||
|
||||
fail("should have failed because of unsupported method");
|
||||
} catch (UnsupportedOperationException exception) {
|
||||
assertEquals("http method not supported: unsupported", exception.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testPerformAsyncWithNullParams() throws Exception {
|
||||
RestClient.SyncResponseListener listener = new RestClient.SyncResponseListener(10000);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.performRequestAsync(randomAsciiOfLength(5), randomAsciiOfLength(5), null, listener);
|
||||
listener.get();
|
||||
|
||||
fail("should have failed because of null parameters");
|
||||
} catch (NullPointerException exception) {
|
||||
assertEquals("params must not be null", exception.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testPerformAsyncWithNullHeaders() throws Exception {
|
||||
RestClient.SyncResponseListener listener = new RestClient.SyncResponseListener(10000);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.performRequestAsync("GET", randomAsciiOfLength(5), listener, (Header) null);
|
||||
listener.get();
|
||||
|
||||
fail("should have failed because of null headers");
|
||||
} catch (NullPointerException exception) {
|
||||
assertEquals("request header must not be null", exception.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testPerformAsyncWithWrongEndpoint() throws Exception {
|
||||
RestClient.SyncResponseListener listener = new RestClient.SyncResponseListener(10000);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.performRequestAsync("GET", "::http:///", listener);
|
||||
listener.get();
|
||||
|
||||
fail("should have failed because of wrong endpoint");
|
||||
} catch (IllegalArgumentException exception) {
|
||||
assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private static RestClient createRestClient() {
|
||||
HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)};
|
||||
return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000), new Header[]{}, hosts, null, null);
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
9ad2a7bd252cbdb76ac121287e670d75f4db2cd3
|
|
@ -1 +0,0 @@
|
|||
886c1da9adc3347f61ab95ecbf4dbeeaa0e7acb2
|
|
@ -0,0 +1 @@
|
|||
c6a940eff8a87df40262b752ed7b135e448b7873
|
|
@ -1 +0,0 @@
|
|||
df9e94f63ad7d9188f14820c435ea1dc3c28d87a
|
|
@ -0,0 +1 @@
|
|||
6ef5ad88141760c00ea041da1535f3ffc364d67d
|
|
@ -1 +0,0 @@
|
|||
3539f8dc9c3ed8ebe90afcb3daa2e9afcf5108d1
|
|
@ -0,0 +1 @@
|
|||
f15775571fb5762dfc92e00c3909cb8db8ff1d53
|
|
@ -1 +0,0 @@
|
|||
da76338e4f299963da9d7ab33dae7586dfc902c2
|
|
@ -0,0 +1 @@
|
|||
051d793aa64257beead4ccc7432eb5df81d17f23
|
|
@ -1 +0,0 @@
|
|||
f6318d120236c7ac03fca6bf98825b4cb4347fc8
|
|
@ -0,0 +1 @@
|
|||
5bc4cba55670c14ea812ff5de65edad4c312fdf6
|
|
@ -1 +0,0 @@
|
|||
68f045ff272e10c307fe25a1867c2948b614b57c
|
|
@ -0,0 +1 @@
|
|||
68cf08bcd8414a57493debf3a6a509d78a9abb56
|
|
@ -1 +0,0 @@
|
|||
b58a7a15267614a9a14f7cf6257454e0c24b146d
|
|
@ -0,0 +1 @@
|
|||
f5d90756dbeda1218d723b7bea0799c88d621adb
|
|
@ -1 +0,0 @@
|
|||
d5f00fcd00fee6906b563d201bc00bdea7a92baa
|
|
@ -0,0 +1 @@
|
|||
9298e7d1ed96e7beb63d7ccdce1a4502eb0fe484
|
|
@ -1 +0,0 @@
|
|||
2664901a494d87e9f4cef65be14cca918da7c4f5
|
|
@ -0,0 +1 @@
|
|||
918de18963607af69dff38e4773c0bde89c73ae3
|
|
@ -1 +0,0 @@
|
|||
476a79293f9a15ea1ee5f93684587205d03480d1
|
|
@ -0,0 +1 @@
|
|||
a311a7d9f3e9a8fbf3a367a4e2731f9d4579732b
|
|
@ -1 +0,0 @@
|
|||
f4dd70223178cca067b0cade4e58c4d82bec87d6
|
|
@ -0,0 +1 @@
|
|||
693bc4cb0e2e4465e0173c67ed0818071c4b460b
|
|
@ -1 +0,0 @@
|
|||
72c4ec5d811480164db556b54c7a76bd3ea16bd6
|
|
@ -0,0 +1 @@
|
|||
0326f31e63c76d476c23488c7354265cf915350f
|
|
@ -1 +0,0 @@
|
|||
f7af3755fdd09df7c258c655aff03ddef9536a04
|
|
@ -0,0 +1 @@
|
|||
69a3a86e9d045f872408793ea411d49e0c577268
|
|
@ -1 +0,0 @@
|
|||
2bf820109203b990e93a05dade8dcebec6aeb71a
|
|
@ -0,0 +1 @@
|
|||
fabc05ca175150171cf60370877276b933716bcd
|
|
@ -1 +0,0 @@
|
|||
fc1f32923ee68761ee05051f4ef6f4a4ab3acdec
|
|
@ -19,8 +19,10 @@
|
|||
|
||||
package org.apache.lucene.queryparser.classic;
|
||||
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.WildcardQuery;
|
||||
import org.elasticsearch.index.mapper.FieldNamesFieldMapper;
|
||||
import org.elasticsearch.index.query.ExistsQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
|
@ -30,6 +32,13 @@ public class ExistsFieldQueryExtension implements FieldQueryExtension {
|
|||
|
||||
@Override
|
||||
public Query query(QueryShardContext context, String queryText) {
|
||||
return new ConstantScoreQuery(ExistsQueryBuilder.newFilter(context, queryText));
|
||||
final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType =
|
||||
(FieldNamesFieldMapper.FieldNamesFieldType) context.getMapperService().fullName(FieldNamesFieldMapper.NAME);
|
||||
if (fieldNamesFieldType.isEnabled() == false) {
|
||||
// The field_names_field is disabled so we switch to a wildcard query that matches all terms
|
||||
return new WildcardQuery(new Term(queryText, "*"));
|
||||
}
|
||||
|
||||
return ExistsQueryBuilder.newFilter(context, queryText);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,6 @@
|
|||
|
||||
package org.apache.lucene.queryparser.classic;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
|
@ -33,12 +30,14 @@ import org.apache.lucene.search.BooleanQuery;
|
|||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.GraphQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.MultiPhraseQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SynonymQuery;
|
||||
import org.apache.lucene.search.spans.SpanNearQuery;
|
||||
import org.apache.lucene.search.spans.SpanOrQuery;
|
||||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.automaton.RegExp;
|
||||
|
@ -59,6 +58,9 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
|
||||
|
||||
/**
|
||||
* A query parser that uses the {@link MapperService} in order to build smarter
|
||||
* queries based on the mapping information.
|
||||
|
@ -565,22 +567,16 @@ public class MapperQueryParser extends AnalyzingQueryParser {
|
|||
|
||||
@Override
|
||||
protected Query getWildcardQuery(String field, String termStr) throws ParseException {
|
||||
if (termStr.equals("*")) {
|
||||
// we want to optimize for match all query for the "*:*", and "*" cases
|
||||
if ("*".equals(field) || Objects.equals(field, this.field)) {
|
||||
String actualField = field;
|
||||
if (actualField == null) {
|
||||
actualField = this.field;
|
||||
}
|
||||
if (actualField == null) {
|
||||
return newMatchAllDocsQuery();
|
||||
}
|
||||
if ("*".equals(actualField) || "_all".equals(actualField)) {
|
||||
return newMatchAllDocsQuery();
|
||||
}
|
||||
// effectively, we check if a field exists or not
|
||||
return FIELD_QUERY_EXTENSIONS.get(ExistsFieldQueryExtension.NAME).query(context, actualField);
|
||||
if (termStr.equals("*") && field != null) {
|
||||
if ("*".equals(field)) {
|
||||
return newMatchAllDocsQuery();
|
||||
}
|
||||
String actualField = field;
|
||||
if (actualField == null) {
|
||||
actualField = this.field;
|
||||
}
|
||||
// effectively, we check if a field exists or not
|
||||
return FIELD_QUERY_EXTENSIONS.get(ExistsFieldQueryExtension.NAME).query(context, actualField);
|
||||
}
|
||||
Collection<String> fields = extractMultiFields(field);
|
||||
if (fields != null) {
|
||||
|
@ -618,6 +614,10 @@ public class MapperQueryParser extends AnalyzingQueryParser {
|
|||
}
|
||||
|
||||
private Query getWildcardQuerySingle(String field, String termStr) throws ParseException {
|
||||
if ("*".equals(termStr)) {
|
||||
// effectively, we check if a field exists or not
|
||||
return FIELD_QUERY_EXTENSIONS.get(ExistsFieldQueryExtension.NAME).query(context, field);
|
||||
}
|
||||
String indexedNameField = field;
|
||||
currentFieldType = null;
|
||||
Analyzer oldAnalyzer = getAnalyzer();
|
||||
|
@ -747,26 +747,29 @@ public class MapperQueryParser extends AnalyzingQueryParser {
|
|||
MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder((MultiPhraseQuery) q);
|
||||
builder.setSlop(slop);
|
||||
return builder.build();
|
||||
} else if (q instanceof GraphQuery && ((GraphQuery) q).hasPhrase()) {
|
||||
// we have a graph query that has at least one phrase sub-query
|
||||
// re-build and set slop on all phrase queries
|
||||
List<Query> oldQueries = ((GraphQuery) q).getQueries();
|
||||
Query[] queries = new Query[oldQueries.size()];
|
||||
for (int i = 0; i < queries.length; i++) {
|
||||
Query oldQuery = oldQueries.get(i);
|
||||
if (oldQuery instanceof PhraseQuery) {
|
||||
queries[i] = addSlopToPhrase((PhraseQuery) oldQuery, slop);
|
||||
} else {
|
||||
queries[i] = oldQuery;
|
||||
}
|
||||
}
|
||||
|
||||
return new GraphQuery(queries);
|
||||
} else if (q instanceof SpanQuery) {
|
||||
return addSlopToSpan((SpanQuery) q, slop);
|
||||
} else {
|
||||
return q;
|
||||
}
|
||||
}
|
||||
|
||||
private Query addSlopToSpan(SpanQuery query, int slop) {
|
||||
if (query instanceof SpanNearQuery) {
|
||||
return new SpanNearQuery(((SpanNearQuery) query).getClauses(), slop,
|
||||
((SpanNearQuery) query).isInOrder());
|
||||
} else if (query instanceof SpanOrQuery) {
|
||||
SpanQuery[] clauses = new SpanQuery[((SpanOrQuery) query).getClauses().length];
|
||||
int pos = 0;
|
||||
for (SpanQuery clause : ((SpanOrQuery) query).getClauses()) {
|
||||
clauses[pos++] = (SpanQuery) addSlopToSpan(clause, slop);
|
||||
}
|
||||
return new SpanOrQuery(clauses);
|
||||
} else {
|
||||
return query;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Rebuild a phrase query with a slop value
|
||||
*/
|
||||
|
|
|
@ -112,6 +112,8 @@ public class Version implements Comparable<Version> {
|
|||
public static final Version V_5_2_1_UNRELEASED = new Version(V_5_2_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_2_2_ID_UNRELEASED = 5020299;
|
||||
public static final Version V_5_2_2_UNRELEASED = new Version(V_5_2_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_2_3_ID_UNRELEASED = 5020399;
|
||||
public static final Version V_5_2_3_UNRELEASED = new Version(V_5_2_3_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_3_0_ID_UNRELEASED = 5030099;
|
||||
public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_4_0_ID_UNRELEASED = 5040099;
|
||||
|
@ -138,6 +140,8 @@ public class Version implements Comparable<Version> {
|
|||
return V_6_0_0_alpha1_UNRELEASED;
|
||||
case V_5_3_0_ID_UNRELEASED:
|
||||
return V_5_3_0_UNRELEASED;
|
||||
case V_5_2_3_ID_UNRELEASED:
|
||||
return V_5_2_3_UNRELEASED;
|
||||
case V_5_2_2_ID_UNRELEASED:
|
||||
return V_5_2_2_UNRELEASED;
|
||||
case V_5_2_1_ID_UNRELEASED:
|
||||
|
|
|
@ -88,7 +88,7 @@ public final class TaskOperationFailure implements Writeable, ToXContent {
|
|||
return status;
|
||||
}
|
||||
|
||||
public Throwable getCause() {
|
||||
public Exception getCause() {
|
||||
return reason;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,40 +19,75 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.state;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* The response for getting the cluster state.
|
||||
*/
|
||||
public class ClusterStateResponse extends ActionResponse {
|
||||
|
||||
private ClusterName clusterName;
|
||||
private ClusterState clusterState;
|
||||
// the total compressed size of the full cluster state, not just
|
||||
// the parts included in this response
|
||||
private ByteSizeValue totalCompressedSize;
|
||||
|
||||
public ClusterStateResponse() {
|
||||
}
|
||||
|
||||
public ClusterStateResponse(ClusterName clusterName, ClusterState clusterState) {
|
||||
public ClusterStateResponse(ClusterName clusterName, ClusterState clusterState, long sizeInBytes) {
|
||||
this.clusterName = clusterName;
|
||||
this.clusterState = clusterState;
|
||||
this.totalCompressedSize = new ByteSizeValue(sizeInBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* The requested cluster state. Only the parts of the cluster state that were
|
||||
* requested are included in the returned {@link ClusterState} instance.
|
||||
*/
|
||||
public ClusterState getState() {
|
||||
return this.clusterState;
|
||||
}
|
||||
|
||||
/**
|
||||
* The name of the cluster.
|
||||
*/
|
||||
public ClusterName getClusterName() {
|
||||
return this.clusterName;
|
||||
}
|
||||
|
||||
/**
|
||||
* The total compressed size of the full cluster state, not just the parts
|
||||
* returned by {@link #getState()}. The total compressed size is the size
|
||||
* of the cluster state as it would be transmitted over the network during
|
||||
* intra-node communication.
|
||||
*/
|
||||
public ByteSizeValue getTotalCompressedSize() {
|
||||
return totalCompressedSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
clusterName = new ClusterName(in);
|
||||
clusterState = ClusterState.readFrom(in, null);
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
totalCompressedSize = new ByteSizeValue(in);
|
||||
} else {
|
||||
// in a mixed cluster, if a pre 6.0 node processes the get cluster state
|
||||
// request, then a compressed size won't be returned, so just return 0;
|
||||
// its a temporary situation until all nodes in the cluster have been upgraded,
|
||||
// at which point the correct cluster state size will always be reported
|
||||
totalCompressedSize = new ByteSizeValue(0L);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -60,5 +95,8 @@ public class ClusterStateResponse extends ActionResponse {
|
|||
super.writeTo(out);
|
||||
clusterName.writeTo(out);
|
||||
clusterState.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
totalCompressedSize.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.admin.cluster.state;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
|
@ -36,6 +37,10 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.discovery.zen.PublishClusterStateAction.serializeFullClusterState;
|
||||
|
||||
public class TransportClusterStateAction extends TransportMasterNodeReadAction<ClusterStateRequest, ClusterStateResponse> {
|
||||
|
||||
|
||||
|
@ -66,7 +71,8 @@ public class TransportClusterStateAction extends TransportMasterNodeReadAction<C
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterStateRequest request, final ClusterState state, ActionListener<ClusterStateResponse> listener) {
|
||||
protected void masterOperation(final ClusterStateRequest request, final ClusterState state,
|
||||
final ActionListener<ClusterStateResponse> listener) throws IOException {
|
||||
ClusterState currentState = clusterService.state();
|
||||
logger.trace("Serving cluster state request using version {}", currentState.version());
|
||||
ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName());
|
||||
|
@ -122,7 +128,8 @@ public class TransportClusterStateAction extends TransportMasterNodeReadAction<C
|
|||
if (request.customs()) {
|
||||
builder.customs(currentState.customs());
|
||||
}
|
||||
listener.onResponse(new ClusterStateResponse(currentState.getClusterName(), builder.build()));
|
||||
listener.onResponse(new ClusterStateResponse(currentState.getClusterName(), builder.build(),
|
||||
serializeFullClusterState(currentState, Version.CURRENT).length()));
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||
|
||||
private static ObjectHashSet<String> RESERVED_FIELDS = ObjectHashSet.from(
|
||||
"_uid", "_id", "_type", "_source", "_all", "_analyzer", "_parent", "_routing", "_index",
|
||||
"_size", "_timestamp", "_ttl"
|
||||
"_size", "_timestamp", "_ttl", "_field_names"
|
||||
);
|
||||
|
||||
private String[] indices;
|
||||
|
|
|
@ -122,6 +122,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
|||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
}
|
||||
|
||||
RestStatus status = null;
|
||||
ElasticsearchException exception = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
|
@ -132,7 +133,11 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
|||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
exception = ElasticsearchException.fromXContent(parser);
|
||||
}
|
||||
} else if (STATUS.equals(currentFieldName) == false) {
|
||||
} else if (STATUS.equals(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
status = RestStatus.fromCode(parser.intValue());
|
||||
}
|
||||
} else {
|
||||
itemParser.accept(parser);
|
||||
}
|
||||
}
|
||||
|
@ -143,7 +148,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
|||
|
||||
BulkItemResponse bulkItemResponse;
|
||||
if (exception != null) {
|
||||
Failure failure = new Failure(builder.getShardId().getIndexName(), builder.getType(), builder.getId(), exception);
|
||||
Failure failure = new Failure(builder.getShardId().getIndexName(), builder.getType(), builder.getId(), exception, status);
|
||||
bulkItemResponse = new BulkItemResponse(id, opType, failure);
|
||||
} else {
|
||||
bulkItemResponse = new BulkItemResponse(id, opType, builder.build());
|
||||
|
@ -167,12 +172,16 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
|||
private final Exception cause;
|
||||
private final RestStatus status;
|
||||
|
||||
public Failure(String index, String type, String id, Exception cause) {
|
||||
Failure(String index, String type, String id, Exception cause, RestStatus status) {
|
||||
this.index = index;
|
||||
this.type = type;
|
||||
this.id = id;
|
||||
this.cause = cause;
|
||||
this.status = ExceptionsHelper.status(cause);
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public Failure(String index, String type, String id, Exception cause) {
|
||||
this(index, type, id, cause, ExceptionsHelper.status(cause));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
|
||||
/**
|
||||
* A struct-like holder for a bulk items reponse, result, and the resulting
|
||||
* replica operation to be executed.
|
||||
*/
|
||||
class BulkItemResultHolder {
|
||||
public final @Nullable DocWriteResponse response;
|
||||
public final @Nullable Engine.Result operationResult;
|
||||
public final BulkItemRequest replicaRequest;
|
||||
|
||||
BulkItemResultHolder(@Nullable DocWriteResponse response,
|
||||
@Nullable Engine.Result operationResult,
|
||||
BulkItemRequest replicaRequest) {
|
||||
this.response = response;
|
||||
this.operationResult = operationResult;
|
||||
this.replicaRequest = replicaRequest;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public interface MappingUpdatePerformer {
|
||||
/**
|
||||
* Determine if any mappings need to be updated, and update them on the
|
||||
* master node if necessary. Returnes a failed {@code Engine.IndexResult}
|
||||
* in the event updating the mappings fails or null if successful.
|
||||
* Throws a {@code ReplicationOperation.RetryOnPrimaryException} if the
|
||||
* operation needs to be retried on the primary due to the mappings not
|
||||
* being present yet, or a different exception if updating the mappings
|
||||
* on the master failed.
|
||||
*/
|
||||
@Nullable
|
||||
MappingUpdateResult updateMappingsIfNeeded(IndexShard primary, IndexRequest request) throws Exception;
|
||||
|
||||
/**
|
||||
* Class encapsulating the resulting of potentially updating the mapping
|
||||
*/
|
||||
class MappingUpdateResult {
|
||||
@Nullable
|
||||
public final Engine.Index operation;
|
||||
@Nullable
|
||||
public final Exception failure;
|
||||
|
||||
MappingUpdateResult(Exception failure) {
|
||||
Objects.requireNonNull(failure, "failure cannot be null");
|
||||
this.failure = failure;
|
||||
this.operation = null;
|
||||
}
|
||||
|
||||
MappingUpdateResult(Engine.Index operation) {
|
||||
Objects.requireNonNull(operation, "operation cannot be null");
|
||||
this.operation = operation;
|
||||
this.failure = null;
|
||||
}
|
||||
|
||||
public boolean isFailed() {
|
||||
return failure != null;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
|
@ -42,9 +43,11 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
@ -65,12 +68,16 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
/** Performs shard-level bulk (index, delete or update) operations */
|
||||
public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequest, BulkShardRequest, BulkShardResponse> {
|
||||
|
||||
public static final String ACTION_NAME = BulkAction.NAME + "[s]";
|
||||
|
||||
private static final Logger logger = ESLoggerFactory.getLogger(TransportShardBulkAction.class);
|
||||
|
||||
private final UpdateHelper updateHelper;
|
||||
private final MappingUpdatedAction mappingUpdatedAction;
|
||||
|
||||
|
@ -105,8 +112,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
BulkShardRequest request, IndexShard primary) throws Exception {
|
||||
final IndexMetaData metaData = primary.indexSettings().getIndexMetaData();
|
||||
Translog.Location location = null;
|
||||
final MappingUpdatePerformer mappingUpdater = new ConcreteMappingUpdatePerformer();
|
||||
for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) {
|
||||
location = executeBulkItemRequest(metaData, primary, request, location, requestIndex);
|
||||
location = executeBulkItemRequest(metaData, primary, request, location, requestIndex,
|
||||
updateHelper, threadPool::absoluteTimeInMillis, mappingUpdater);
|
||||
}
|
||||
BulkItemResponse[] responses = new BulkItemResponse[request.items().length];
|
||||
BulkItemRequest[] items = request.items();
|
||||
|
@ -117,57 +126,56 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
return new WritePrimaryResult<>(request, response, location, null, primary, logger);
|
||||
}
|
||||
|
||||
/** Executes bulk item requests and handles request execution exceptions */
|
||||
private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard primary,
|
||||
BulkShardRequest request,
|
||||
Translog.Location location, int requestIndex) throws Exception {
|
||||
final DocWriteRequest itemRequest = request.items()[requestIndex].request();
|
||||
final DocWriteRequest.OpType opType = itemRequest.opType();
|
||||
final Engine.Result operationResult;
|
||||
final DocWriteResponse response;
|
||||
final BulkItemRequest replicaRequest;
|
||||
switch (itemRequest.opType()) {
|
||||
case CREATE:
|
||||
case INDEX:
|
||||
final IndexRequest indexRequest = (IndexRequest) itemRequest;
|
||||
Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
|
||||
response = indexResult.hasFailure() ? null :
|
||||
new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(),
|
||||
indexResult.getVersion(), indexResult.isCreated());
|
||||
operationResult = indexResult;
|
||||
replicaRequest = request.items()[requestIndex];
|
||||
break;
|
||||
case UPDATE:
|
||||
UpdateResultHolder updateResultHolder = executeUpdateRequest(((UpdateRequest) itemRequest),
|
||||
primary, metaData, request, requestIndex);
|
||||
operationResult = updateResultHolder.operationResult;
|
||||
response = updateResultHolder.response;
|
||||
replicaRequest = updateResultHolder.replicaRequest;
|
||||
break;
|
||||
case DELETE:
|
||||
final DeleteRequest deleteRequest = (DeleteRequest) itemRequest;
|
||||
Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary);
|
||||
response = deleteResult.hasFailure() ? null :
|
||||
new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(),
|
||||
deleteResult.getVersion(), deleteResult.isFound());
|
||||
operationResult = deleteResult;
|
||||
replicaRequest = request.items()[requestIndex];
|
||||
break;
|
||||
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
|
||||
}
|
||||
|
||||
// update the bulk item request because update request execution can mutate the bulk item request
|
||||
request.items()[requestIndex] = replicaRequest;
|
||||
private static BulkItemResultHolder executeIndexRequest(final IndexRequest indexRequest,
|
||||
final BulkItemRequest bulkItemRequest,
|
||||
final IndexShard primary,
|
||||
final MappingUpdatePerformer mappingUpdater) throws Exception {
|
||||
Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdater);
|
||||
if (indexResult.hasFailure()) {
|
||||
return new BulkItemResultHolder(null, indexResult, bulkItemRequest);
|
||||
} else {
|
||||
IndexResponse response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(),
|
||||
indexResult.getSeqNo(), indexResult.getVersion(), indexResult.isCreated());
|
||||
return new BulkItemResultHolder(response, indexResult, bulkItemRequest);
|
||||
}
|
||||
}
|
||||
|
||||
private static BulkItemResultHolder executeDeleteRequest(final DeleteRequest deleteRequest,
|
||||
final BulkItemRequest bulkItemRequest,
|
||||
final IndexShard primary) throws IOException {
|
||||
Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary);
|
||||
if (deleteResult.hasFailure()) {
|
||||
return new BulkItemResultHolder(null, deleteResult, bulkItemRequest);
|
||||
} else {
|
||||
DeleteResponse response = new DeleteResponse(primary.shardId(), deleteRequest.type(), deleteRequest.id(),
|
||||
deleteResult.getSeqNo(), deleteResult.getVersion(), deleteResult.isFound());
|
||||
return new BulkItemResultHolder(response, deleteResult, bulkItemRequest);
|
||||
}
|
||||
}
|
||||
|
||||
// Visible for unit testing
|
||||
static Translog.Location updateReplicaRequest(BulkItemResultHolder bulkItemResult,
|
||||
final DocWriteRequest.OpType opType,
|
||||
final Translog.Location originalLocation,
|
||||
BulkShardRequest request) {
|
||||
final Engine.Result operationResult = bulkItemResult.operationResult;
|
||||
final DocWriteResponse response = bulkItemResult.response;
|
||||
final BulkItemRequest replicaRequest = bulkItemResult.replicaRequest;
|
||||
|
||||
if (operationResult == null) { // in case of noop update operation
|
||||
assert response.getResult() == DocWriteResponse.Result.NOOP
|
||||
: "only noop update can have null operation";
|
||||
assert response.getResult() == DocWriteResponse.Result.NOOP : "only noop updates can have a null operation";
|
||||
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response));
|
||||
return originalLocation;
|
||||
|
||||
} else if (operationResult.hasFailure() == false) {
|
||||
location = locationToSync(location, operationResult.getTranslogLocation());
|
||||
BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response);
|
||||
replicaRequest.setPrimaryResponse(primaryResponse);
|
||||
// set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though.
|
||||
// set a blank ShardInfo so we can safely send it to the replicas. We won't use it in the real response though.
|
||||
primaryResponse.getResponse().setShardInfo(new ShardInfo());
|
||||
// The operation was successful, advance the translog
|
||||
return locationToSync(originalLocation, operationResult.getTranslogLocation());
|
||||
|
||||
} else {
|
||||
DocWriteRequest docWriteRequest = replicaRequest.request();
|
||||
Exception failure = operationResult.getFailure();
|
||||
|
@ -178,15 +186,57 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
||||
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
|
||||
}
|
||||
// if its a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
|
||||
|
||||
// if it's a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
|
||||
// then just use the response we got from the successful execution
|
||||
// then just use the response we got from the failed execution
|
||||
if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) {
|
||||
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(),
|
||||
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)));
|
||||
replicaRequest.setPrimaryResponse(
|
||||
new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(),
|
||||
// Make sure to use request.indox() here, if you
|
||||
// use docWriteRequest.index() it will use the
|
||||
// concrete index instead of an alias if used!
|
||||
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)));
|
||||
}
|
||||
return originalLocation;
|
||||
}
|
||||
assert replicaRequest.getPrimaryResponse() != null;
|
||||
}
|
||||
|
||||
/** Executes bulk item requests and handles request execution exceptions */
|
||||
static Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard primary,
|
||||
BulkShardRequest request, Translog.Location location,
|
||||
int requestIndex, UpdateHelper updateHelper,
|
||||
LongSupplier nowInMillisSupplier,
|
||||
final MappingUpdatePerformer mappingUpdater) throws Exception {
|
||||
final DocWriteRequest itemRequest = request.items()[requestIndex].request();
|
||||
final DocWriteRequest.OpType opType = itemRequest.opType();
|
||||
final BulkItemResultHolder responseHolder;
|
||||
switch (itemRequest.opType()) {
|
||||
case CREATE:
|
||||
case INDEX:
|
||||
responseHolder = executeIndexRequest((IndexRequest) itemRequest,
|
||||
request.items()[requestIndex], primary, mappingUpdater);
|
||||
break;
|
||||
case UPDATE:
|
||||
responseHolder = executeUpdateRequest((UpdateRequest) itemRequest, primary, metaData, request,
|
||||
requestIndex, updateHelper, nowInMillisSupplier, mappingUpdater);
|
||||
break;
|
||||
case DELETE:
|
||||
responseHolder = executeDeleteRequest((DeleteRequest) itemRequest, request.items()[requestIndex], primary);
|
||||
break;
|
||||
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
|
||||
}
|
||||
|
||||
final BulkItemRequest replicaRequest = responseHolder.replicaRequest;
|
||||
|
||||
// update the bulk item request because update request execution can mutate the bulk item request
|
||||
request.items()[requestIndex] = replicaRequest;
|
||||
|
||||
// Modify the replica request, if needed, and return a new translog location
|
||||
location = updateReplicaRequest(responseHolder, opType, location, request);
|
||||
|
||||
assert replicaRequest.getPrimaryResponse() != null : "replica request must have a primary response";
|
||||
return location;
|
||||
}
|
||||
|
||||
|
@ -194,29 +244,18 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
return ExceptionsHelper.unwrapCause(e) instanceof VersionConflictEngineException;
|
||||
}
|
||||
|
||||
private static class UpdateResultHolder {
|
||||
final BulkItemRequest replicaRequest;
|
||||
final Engine.Result operationResult;
|
||||
final DocWriteResponse response;
|
||||
|
||||
private UpdateResultHolder(BulkItemRequest replicaRequest, Engine.Result operationResult,
|
||||
DocWriteResponse response) {
|
||||
this.replicaRequest = replicaRequest;
|
||||
this.operationResult = operationResult;
|
||||
this.response = response;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes update request, delegating to a index or delete operation after translation,
|
||||
* handles retries on version conflict and constructs update response
|
||||
* NOTE: reassigns bulk item request at <code>requestIndex</code> for replicas to
|
||||
* execute translated update request (NOOP update is an exception). NOOP updates are
|
||||
* indicated by returning a <code>null</code> operation in {@link UpdateResultHolder}
|
||||
* indicated by returning a <code>null</code> operation in {@link BulkItemResultHolder}
|
||||
* */
|
||||
private UpdateResultHolder executeUpdateRequest(UpdateRequest updateRequest, IndexShard primary,
|
||||
IndexMetaData metaData, BulkShardRequest request,
|
||||
int requestIndex) throws Exception {
|
||||
private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateRequest, IndexShard primary,
|
||||
IndexMetaData metaData, BulkShardRequest request,
|
||||
int requestIndex, UpdateHelper updateHelper,
|
||||
LongSupplier nowInMillis,
|
||||
final MappingUpdatePerformer mappingUpdater) throws Exception {
|
||||
Engine.Result updateOperationResult = null;
|
||||
UpdateResponse updateResponse = null;
|
||||
BulkItemRequest replicaRequest = request.items()[requestIndex];
|
||||
|
@ -225,7 +264,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
final UpdateHelper.Result translate;
|
||||
// translate update request
|
||||
try {
|
||||
translate = updateHelper.prepare(updateRequest, primary, threadPool::absoluteTimeInMillis);
|
||||
translate = updateHelper.prepare(updateRequest, primary, nowInMillis);
|
||||
} catch (Exception failure) {
|
||||
// we may fail translating a update to index or delete operation
|
||||
// we use index result to communicate failure while translating update request
|
||||
|
@ -239,7 +278,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
IndexRequest indexRequest = translate.action();
|
||||
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
|
||||
indexRequest.process(mappingMd, request.index());
|
||||
updateOperationResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
|
||||
updateOperationResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdater);
|
||||
break;
|
||||
case DELETED:
|
||||
DeleteRequest deleteRequest = translate.action();
|
||||
|
@ -300,7 +339,14 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
break; // out of retry loop
|
||||
}
|
||||
}
|
||||
return new UpdateResultHolder(replicaRequest, updateOperationResult, updateResponse);
|
||||
return new BulkItemResultHolder(updateResponse, updateOperationResult, replicaRequest);
|
||||
}
|
||||
|
||||
static boolean shouldExecuteReplicaItem(final BulkItemRequest request, final int index) {
|
||||
final BulkItemResponse primaryResponse = request.getPrimaryResponse();
|
||||
assert primaryResponse != null : "expected primary response to be set for item [" + index + "] request ["+ request.request() +"]";
|
||||
return primaryResponse.isFailed() == false &&
|
||||
primaryResponse.getResponse().getResult() != DocWriteResponse.Result.NOOP;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -308,9 +354,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
Translog.Location location = null;
|
||||
for (int i = 0; i < request.items().length; i++) {
|
||||
BulkItemRequest item = request.items()[i];
|
||||
assert item.getPrimaryResponse() != null : "expected primary response to be set for item [" + i + "] request ["+ item.request() +"]";
|
||||
if (item.getPrimaryResponse().isFailed() == false &&
|
||||
item.getPrimaryResponse().getResponse().getResult() != DocWriteResponse.Result.NOOP) {
|
||||
if (shouldExecuteReplicaItem(item, i)) {
|
||||
DocWriteRequest docWriteRequest = item.request();
|
||||
DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse();
|
||||
final Engine.Result operationResult;
|
||||
|
@ -352,7 +396,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
return new WriteReplicaResult<>(request, location, null, replica, logger);
|
||||
}
|
||||
|
||||
private Translog.Location locationToSync(Translog.Location current, Translog.Location next) {
|
||||
private static Translog.Location locationToSync(Translog.Location current, Translog.Location next) {
|
||||
/* here we are moving forward in the translog with each operation. Under the hood
|
||||
* this might cross translog files which is ok since from the user perspective
|
||||
* the translog is like a tape where only the highest location needs to be fsynced
|
||||
|
@ -391,7 +435,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
}
|
||||
|
||||
/** Utility method to prepare an index operation on primary shards */
|
||||
private static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
|
||||
static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
|
||||
SourceToParse sourceToParse =
|
||||
SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source(),
|
||||
request.getContentType()).routing(request.routing()).parent(request.parent());
|
||||
|
@ -400,36 +444,12 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
|
||||
/** Executes index operation on primary shard after updates mapping if dynamic mappings are found */
|
||||
public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary,
|
||||
MappingUpdatedAction mappingUpdatedAction) throws Exception {
|
||||
Engine.Index operation;
|
||||
try {
|
||||
operation = prepareIndexOperationOnPrimary(request, primary);
|
||||
} catch (MapperParsingException | IllegalArgumentException e) {
|
||||
return new Engine.IndexResult(e, request.version());
|
||||
MappingUpdatePerformer mappingUpdater) throws Exception {
|
||||
MappingUpdatePerformer.MappingUpdateResult result = mappingUpdater.updateMappingsIfNeeded(primary, request);
|
||||
if (result.isFailed()) {
|
||||
return new Engine.IndexResult(result.failure, request.version());
|
||||
}
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
final ShardId shardId = primary.shardId();
|
||||
if (update != null) {
|
||||
// can throw timeout exception when updating mappings or ISE for attempting to update default mappings
|
||||
// which are bubbled up
|
||||
try {
|
||||
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// throws IAE on conflicts merging dynamic mappings
|
||||
return new Engine.IndexResult(e, request.version());
|
||||
}
|
||||
try {
|
||||
operation = prepareIndexOperationOnPrimary(request, primary);
|
||||
} catch (MapperParsingException | IllegalArgumentException e) {
|
||||
return new Engine.IndexResult(e, request.version());
|
||||
}
|
||||
update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
if (update != null) {
|
||||
throw new ReplicationOperation.RetryOnPrimaryException(shardId,
|
||||
"Dynamic mappings are not available on the node that holds the primary yet");
|
||||
}
|
||||
}
|
||||
return primary.index(operation);
|
||||
return primary.index(result.operation);
|
||||
}
|
||||
|
||||
private static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) throws IOException {
|
||||
|
@ -445,4 +465,39 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
primaryResponse.getSeqNo(), request.primaryTerm(), version, versionType);
|
||||
return replica.delete(delete);
|
||||
}
|
||||
|
||||
class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer {
|
||||
|
||||
@Nullable
|
||||
public MappingUpdateResult updateMappingsIfNeeded(IndexShard primary, IndexRequest request) throws Exception {
|
||||
Engine.Index operation;
|
||||
try {
|
||||
operation = prepareIndexOperationOnPrimary(request, primary);
|
||||
} catch (MapperParsingException | IllegalArgumentException e) {
|
||||
return new MappingUpdateResult(e);
|
||||
}
|
||||
final Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
final ShardId shardId = primary.shardId();
|
||||
if (update != null) {
|
||||
// can throw timeout exception when updating mappings or ISE for attempting to update default mappings
|
||||
// which are bubbled up
|
||||
try {
|
||||
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// throws IAE on conflicts merging dynamic mappings
|
||||
return new MappingUpdateResult(e);
|
||||
}
|
||||
try {
|
||||
operation = prepareIndexOperationOnPrimary(request, primary);
|
||||
} catch (MapperParsingException | IllegalArgumentException e) {
|
||||
return new MappingUpdateResult(e);
|
||||
}
|
||||
if (operation.parsedDoc().dynamicMappingsUpdate() != null) {
|
||||
throw new ReplicationOperation.RetryOnPrimaryException(shardId,
|
||||
"Dynamic mappings are not available on the node that holds the primary yet");
|
||||
}
|
||||
}
|
||||
return new MappingUpdateResult(operation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -111,4 +111,16 @@ public class DeleteByQueryRequest extends AbstractBulkByScrollRequest<DeleteByQu
|
|||
assert getSearchRequest() != null;
|
||||
return getSearchRequest().indicesOptions();
|
||||
}
|
||||
|
||||
public String[] types() {
|
||||
assert getSearchRequest() != null;
|
||||
return getSearchRequest().types();
|
||||
}
|
||||
|
||||
public DeleteByQueryRequest types(String... types) {
|
||||
assert getSearchRequest() != null;
|
||||
getSearchRequest().types(types);
|
||||
return this;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -62,7 +62,6 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -233,48 +232,20 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
if (result.queryResult().topDocs() instanceof CollapseTopFieldDocs) {
|
||||
CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) result.queryResult().topDocs();
|
||||
final Sort sort = new Sort(firstTopDocs.fields);
|
||||
|
||||
final CollapseTopFieldDocs[] shardTopDocs = new CollapseTopFieldDocs[numShards];
|
||||
if (result.size() != shardTopDocs.length) {
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
final CollapseTopFieldDocs empty = new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0],
|
||||
sort.getSort(), new Object[0], Float.NaN);
|
||||
Arrays.fill(shardTopDocs, empty);
|
||||
}
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
|
||||
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
|
||||
// the 'index' field is the position in the resultsArr atomic array
|
||||
shardTopDocs[sortedResult.index] = (CollapseTopFieldDocs) topDocs;
|
||||
}
|
||||
fillTopDocs(shardTopDocs, results, new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0],
|
||||
sort.getSort(), new Object[0], Float.NaN));
|
||||
mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs);
|
||||
} else if (result.queryResult().topDocs() instanceof TopFieldDocs) {
|
||||
TopFieldDocs firstTopDocs = (TopFieldDocs) result.queryResult().topDocs();
|
||||
final Sort sort = new Sort(firstTopDocs.fields);
|
||||
|
||||
final TopFieldDocs[] shardTopDocs = new TopFieldDocs[resultsArr.length()];
|
||||
if (result.size() != shardTopDocs.length) {
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
final TopFieldDocs empty = new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN);
|
||||
Arrays.fill(shardTopDocs, empty);
|
||||
}
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
|
||||
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
|
||||
// the 'index' field is the position in the resultsArr atomic array
|
||||
shardTopDocs[sortedResult.index] = (TopFieldDocs) topDocs;
|
||||
}
|
||||
mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs);
|
||||
fillTopDocs(shardTopDocs, results, new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN));
|
||||
mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs, true);
|
||||
} else {
|
||||
final TopDocs[] shardTopDocs = new TopDocs[resultsArr.length()];
|
||||
if (result.size() != shardTopDocs.length) {
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
Arrays.fill(shardTopDocs, Lucene.EMPTY_TOP_DOCS);
|
||||
}
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
|
||||
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
|
||||
// the 'index' field is the position in the resultsArr atomic array
|
||||
shardTopDocs[sortedResult.index] = topDocs;
|
||||
}
|
||||
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs);
|
||||
fillTopDocs(shardTopDocs, results, Lucene.EMPTY_TOP_DOCS);
|
||||
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs, true);
|
||||
}
|
||||
|
||||
ScoreDoc[] scoreDocs = mergedTopDocs.scoreDocs;
|
||||
|
@ -314,6 +285,20 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
return scoreDocs;
|
||||
}
|
||||
|
||||
static <T extends TopDocs> void fillTopDocs(T[] shardTopDocs,
|
||||
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> results,
|
||||
T empytTopDocs) {
|
||||
if (results.size() != shardTopDocs.length) {
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
Arrays.fill(shardTopDocs, empytTopDocs);
|
||||
}
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> resultProvider : results) {
|
||||
final T topDocs = (T) resultProvider.value.queryResult().topDocs();
|
||||
assert topDocs != null : "top docs must not be null in a valid result";
|
||||
// the 'index' field is the position in the resultsArr atomic array
|
||||
shardTopDocs[resultProvider.index] = topDocs;
|
||||
}
|
||||
}
|
||||
public ScoreDoc[] getLastEmittedDocPerShard(ReducedQueryPhase reducedQueryPhase,
|
||||
ScoreDoc[] sortedScoreDocs, int numShards) {
|
||||
ScoreDoc[] lastEmittedDocPerShard = new ScoreDoc[numShards];
|
||||
|
|
|
@ -46,19 +46,18 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
|
|||
|
||||
@Inject
|
||||
public TransportMultiSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ClusterService clusterService, TransportSearchAction searchAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, MultiSearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MultiSearchRequest::new);
|
||||
ClusterService clusterService, TransportSearchAction searchAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver resolver) {
|
||||
super(settings, MultiSearchAction.NAME, threadPool, transportService, actionFilters, resolver, MultiSearchRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchAction = searchAction;
|
||||
this.availableProcessors = EsExecutors.numberOfProcessors(settings);
|
||||
}
|
||||
|
||||
// For testing only:
|
||||
TransportMultiSearchAction(ThreadPool threadPool, ActionFilters actionFilters, TransportService transportService,
|
||||
ClusterService clusterService, TransportAction<SearchRequest, SearchResponse> searchAction,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, int availableProcessors) {
|
||||
super(Settings.EMPTY, MultiSearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MultiSearchRequest::new);
|
||||
IndexNameExpressionResolver resolver, int availableProcessors) {
|
||||
super(Settings.EMPTY, MultiSearchAction.NAME, threadPool, transportService, actionFilters, resolver, MultiSearchRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchAction = searchAction;
|
||||
this.availableProcessors = availableProcessors;
|
||||
|
@ -90,10 +89,9 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
|
|||
}
|
||||
|
||||
/*
|
||||
* This is not perfect and makes a big assumption, that all nodes have the same thread pool size / have the number
|
||||
* of processors and that shard of the indices the search requests go to are more or less evenly distributed across
|
||||
* all nodes in the cluster. But I think it is a good enough default for most cases, if not then the default should be
|
||||
* overwritten in the request itself.
|
||||
* This is not perfect and makes a big assumption, that all nodes have the same thread pool size / have the number of processors and
|
||||
* that shard of the indices the search requests go to are more or less evenly distributed across all nodes in the cluster. But I think
|
||||
* it is a good enough default for most cases, if not then the default should be overwritten in the request itself.
|
||||
*/
|
||||
static int defaultMaxConcurrentSearches(int availableProcessors, ClusterState state) {
|
||||
int numDateNodes = state.getNodes().getDataNodes().size();
|
||||
|
@ -103,33 +101,70 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
|
|||
return Math.max(1, numDateNodes * defaultSearchThreadPoolSize);
|
||||
}
|
||||
|
||||
void executeSearch(Queue<SearchRequestSlot> requests, AtomicArray<MultiSearchResponse.Item> responses,
|
||||
AtomicInteger responseCounter, ActionListener<MultiSearchResponse> listener) {
|
||||
/**
|
||||
* Executes a single request from the queue of requests. When a request finishes, another request is taken from the queue. When a
|
||||
* request is executed, a permit is taken on the specified semaphore, and released as each request completes.
|
||||
*
|
||||
* @param requests the queue of multi-search requests to execute
|
||||
* @param responses atomic array to hold the responses corresponding to each search request slot
|
||||
* @param responseCounter incremented on each response
|
||||
* @param listener the listener attached to the multi-search request
|
||||
*/
|
||||
private void executeSearch(
|
||||
final Queue<SearchRequestSlot> requests,
|
||||
final AtomicArray<MultiSearchResponse.Item> responses,
|
||||
final AtomicInteger responseCounter,
|
||||
final ActionListener<MultiSearchResponse> listener) {
|
||||
SearchRequestSlot request = requests.poll();
|
||||
if (request == null) {
|
||||
// Ok... so there're no more requests then this is ok, we're then waiting for running requests to complete
|
||||
/*
|
||||
* The number of times that we poll an item from the queue here is the minimum of the number of requests and the maximum number
|
||||
* of concurrent requests. At first glance, it appears that we should never poll from the queue and not obtain a request given
|
||||
* that we only poll here no more times than the number of requests. However, this is not the only consumer of this queue as
|
||||
* earlier requests that have already completed will poll from the queue too and they could complete before later polls are
|
||||
* invoked here. Thus, it can be the case that we poll here and and the queue was empty.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* With a request in hand, we are now prepared to execute the search request. There are two possibilities, either we go asynchronous
|
||||
* or we do not (this can happen if the request does not resolve to any shards). If we do not go asynchronous, we are going to come
|
||||
* back on the same thread that attempted to execute the search request. At this point, or any other point where we come back on the
|
||||
* same thread as when the request was submitted, we should not recurse lest we might descend into a stack overflow. To avoid this,
|
||||
* when we handle the response rather than going recursive, we fork to another thread, otherwise we recurse.
|
||||
*/
|
||||
final Thread thread = Thread.currentThread();
|
||||
searchAction.execute(request.request, new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse searchResponse) {
|
||||
responses.set(request.responseSlot, new MultiSearchResponse.Item(searchResponse, null));
|
||||
handleResponse();
|
||||
public void onResponse(final SearchResponse searchResponse) {
|
||||
handleResponse(request.responseSlot, new MultiSearchResponse.Item(searchResponse, null));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
responses.set(request.responseSlot, new MultiSearchResponse.Item(null, e));
|
||||
handleResponse();
|
||||
public void onFailure(final Exception e) {
|
||||
handleResponse(request.responseSlot, new MultiSearchResponse.Item(null, e));
|
||||
}
|
||||
|
||||
private void handleResponse() {
|
||||
private void handleResponse(final int responseSlot, final MultiSearchResponse.Item item) {
|
||||
responses.set(responseSlot, item);
|
||||
if (responseCounter.decrementAndGet() == 0) {
|
||||
listener.onResponse(new MultiSearchResponse(responses.toArray(new MultiSearchResponse.Item[responses.length()])));
|
||||
assert requests.isEmpty();
|
||||
finish();
|
||||
} else {
|
||||
executeSearch(requests, responses, responseCounter, listener);
|
||||
if (thread == Thread.currentThread()) {
|
||||
// we are on the same thread, we need to fork to another thread to avoid recursive stack overflow on a single thread
|
||||
threadPool.generic().execute(() -> executeSearch(requests, responses, responseCounter, listener));
|
||||
} else {
|
||||
// we are on a different thread (we went asynchronous), it's safe to recurse
|
||||
executeSearch(requests, responses, responseCounter, listener);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void finish() {
|
||||
listener.onResponse(new MultiSearchResponse(responses.toArray(new MultiSearchResponse.Item[responses.length()])));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -142,5 +177,7 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
|
|||
this.request = request;
|
||||
this.responseSlot = responseSlot;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ public class ClusterModule extends AbstractModule {
|
|||
addAllocationDecider(deciders, new NodeVersionAllocationDecider(settings));
|
||||
addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider(settings));
|
||||
addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings));
|
||||
addAllocationDecider(deciders, new SameShardAllocationDecider(settings));
|
||||
addAllocationDecider(deciders, new SameShardAllocationDecider(settings, clusterSettings));
|
||||
addAllocationDecider(deciders, new DiskThresholdDecider(settings, clusterSettings));
|
||||
addAllocationDecider(deciders, new ThrottlingAllocationDecider(settings, clusterSettings));
|
||||
addAllocationDecider(deciders, new ShardsLimitAllocationDecider(settings, clusterSettings));
|
||||
|
|
|
@ -23,7 +23,9 @@ import org.elasticsearch.cluster.routing.RoutingNode;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
|
@ -46,14 +48,23 @@ public class SameShardAllocationDecider extends AllocationDecider {
|
|||
public static final String NAME = "same_shard";
|
||||
|
||||
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING =
|
||||
Setting.boolSetting("cluster.routing.allocation.same_shard.host", false, Setting.Property.NodeScope);
|
||||
Setting.boolSetting("cluster.routing.allocation.same_shard.host", false, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
private final boolean sameHost;
|
||||
private volatile boolean sameHost;
|
||||
|
||||
public SameShardAllocationDecider(Settings settings) {
|
||||
public SameShardAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
|
||||
super(settings);
|
||||
|
||||
this.sameHost = CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.get(settings);
|
||||
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING, this::setSameHost);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the same host setting. {@code true} if allocating the same shard copy to the same host
|
||||
* should not be allowed, even when multiple nodes are being run on the same host. {@code false}
|
||||
* otherwise.
|
||||
*/
|
||||
private void setSameHost(boolean sameHost) {
|
||||
this.sameHost = sameHost;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,13 +21,33 @@ package org.elasticsearch.common.logging;
|
|||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.SuppressLoggerChecks;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
|
||||
import java.time.ZoneId;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.time.format.DateTimeFormatterBuilder;
|
||||
import java.time.format.SignStyle;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static java.time.temporal.ChronoField.DAY_OF_MONTH;
|
||||
import static java.time.temporal.ChronoField.DAY_OF_WEEK;
|
||||
import static java.time.temporal.ChronoField.HOUR_OF_DAY;
|
||||
import static java.time.temporal.ChronoField.MINUTE_OF_HOUR;
|
||||
import static java.time.temporal.ChronoField.MONTH_OF_YEAR;
|
||||
import static java.time.temporal.ChronoField.SECOND_OF_MINUTE;
|
||||
import static java.time.temporal.ChronoField.YEAR;
|
||||
|
||||
/**
|
||||
* A logger that logs deprecation notices.
|
||||
|
@ -36,14 +56,6 @@ public class DeprecationLogger {
|
|||
|
||||
private final Logger logger;
|
||||
|
||||
/**
|
||||
* The "Warning" Header comes from RFC-7234. As the RFC describes, it's generally used for caching purposes, but it can be
|
||||
* used for <em>any</em> warning.
|
||||
*
|
||||
* https://tools.ietf.org/html/rfc7234#section-5.5
|
||||
*/
|
||||
public static final String WARNING_HEADER = "Warning";
|
||||
|
||||
/**
|
||||
* This is set once by the {@code Node} constructor, but it uses {@link CopyOnWriteArraySet} to ensure that tests can run in parallel.
|
||||
* <p>
|
||||
|
@ -112,6 +124,114 @@ public class DeprecationLogger {
|
|||
deprecated(THREAD_CONTEXT, msg, params);
|
||||
}
|
||||
|
||||
/*
|
||||
* RFC7234 specifies the warning format as warn-code <space> warn-agent <space> "warn-text" [<space> "warn-date"]. Here, warn-code is a
|
||||
* three-digit number with various standard warn codes specified. The warn code 299 is apt for our purposes as it represents a
|
||||
* miscellaneous persistent warning (can be presented to a human, or logged, and must not be removed by a cache). The warn-agent is an
|
||||
* arbitrary token; here we use the Elasticsearch version and build hash. The warn text must be quoted. The warn-date is an optional
|
||||
* quoted field that can be in a variety of specified date formats; here we use RFC 1123 format.
|
||||
*/
|
||||
private static final String WARNING_FORMAT =
|
||||
String.format(
|
||||
Locale.ROOT,
|
||||
"299 Elasticsearch-%s%s-%s ",
|
||||
Version.CURRENT.toString(),
|
||||
Build.CURRENT.isSnapshot() ? "-SNAPSHOT" : "",
|
||||
Build.CURRENT.shortHash()) +
|
||||
"\"%s\" \"%s\"";
|
||||
|
||||
/*
|
||||
* RFC 7234 section 5.5 specifies that the warn-date is a quoted HTTP-date. HTTP-date is defined in RFC 7234 Appendix B as being from
|
||||
* RFC 7231 section 7.1.1.1. RFC 7231 specifies an HTTP-date as an IMF-fixdate (or an obs-date referring to obsolete formats). The
|
||||
* grammar for IMF-fixdate is specified as 'day-name "," SP date1 SP time-of-day SP GMT'. Here, day-name is
|
||||
* (Mon|Tue|Wed|Thu|Fri|Sat|Sun). Then, date1 is 'day SP month SP year' where day is 2DIGIT, month is
|
||||
* (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec), and year is 4DIGIT. Lastly, time-of-day is 'hour ":" minute ":" second' where
|
||||
* hour is 2DIGIT, minute is 2DIGIT, and second is 2DIGIT. Finally, 2DIGIT and 4DIGIT have the obvious definitions.
|
||||
*/
|
||||
private static final DateTimeFormatter RFC_7231_DATE_TIME;
|
||||
|
||||
static {
|
||||
final Map<Long, String> dow = new HashMap<>();
|
||||
dow.put(1L, "Mon");
|
||||
dow.put(2L, "Tue");
|
||||
dow.put(3L, "Wed");
|
||||
dow.put(4L, "Thu");
|
||||
dow.put(5L, "Fri");
|
||||
dow.put(6L, "Sat");
|
||||
dow.put(7L, "Sun");
|
||||
final Map<Long, String> moy = new HashMap<>();
|
||||
moy.put(1L, "Jan");
|
||||
moy.put(2L, "Feb");
|
||||
moy.put(3L, "Mar");
|
||||
moy.put(4L, "Apr");
|
||||
moy.put(5L, "May");
|
||||
moy.put(6L, "Jun");
|
||||
moy.put(7L, "Jul");
|
||||
moy.put(8L, "Aug");
|
||||
moy.put(9L, "Sep");
|
||||
moy.put(10L, "Oct");
|
||||
moy.put(11L, "Nov");
|
||||
moy.put(12L, "Dec");
|
||||
RFC_7231_DATE_TIME = new DateTimeFormatterBuilder()
|
||||
.parseCaseInsensitive()
|
||||
.parseLenient()
|
||||
.optionalStart()
|
||||
.appendText(DAY_OF_WEEK, dow)
|
||||
.appendLiteral(", ")
|
||||
.optionalEnd()
|
||||
.appendValue(DAY_OF_MONTH, 2, 2, SignStyle.NOT_NEGATIVE)
|
||||
.appendLiteral(' ')
|
||||
.appendText(MONTH_OF_YEAR, moy)
|
||||
.appendLiteral(' ')
|
||||
.appendValue(YEAR, 4)
|
||||
.appendLiteral(' ')
|
||||
.appendValue(HOUR_OF_DAY, 2)
|
||||
.appendLiteral(':')
|
||||
.appendValue(MINUTE_OF_HOUR, 2)
|
||||
.optionalStart()
|
||||
.appendLiteral(':')
|
||||
.appendValue(SECOND_OF_MINUTE, 2)
|
||||
.optionalEnd()
|
||||
.appendLiteral(' ')
|
||||
.appendOffset("+HHMM", "GMT")
|
||||
.toFormatter(Locale.getDefault(Locale.Category.FORMAT));
|
||||
}
|
||||
|
||||
private static final ZoneId GMT = ZoneId.of("GMT");
|
||||
|
||||
/**
|
||||
* Regular expression to test if a string matches the RFC7234 specification for warning headers. This pattern assumes that the warn code
|
||||
* is always 299. Further, this pattern assumes that the warn agent represents a version of Elasticsearch including the build hash.
|
||||
*/
|
||||
public static Pattern WARNING_HEADER_PATTERN = Pattern.compile(
|
||||
"299 " + // warn code
|
||||
"Elasticsearch-\\d+\\.\\d+\\.\\d+(?:-(?:alpha|beta|rc)\\d+)?(?:-SNAPSHOT)?-(?:[a-f0-9]{7}|Unknown) " + // warn agent
|
||||
"\"((?:\t| |!|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x80-\\xff]|\\\\|\\\\\")*)\" " + // quoted warning value, captured
|
||||
// quoted RFC 1123 date format
|
||||
"\"" + // opening quote
|
||||
"(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // weekday
|
||||
"\\d{2} " + // 2-digit day
|
||||
"(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) " + // month
|
||||
"\\d{4} " + // 4-digit year
|
||||
"\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second)
|
||||
"GMT" + // GMT
|
||||
"\""); // closing quote
|
||||
|
||||
/**
|
||||
* Extracts the warning value from the value of a warning header that is formatted according to RFC 7234. That is, given a string
|
||||
* {@code 299 Elasticsearch-6.0.0 "warning value" "Sat, 25 Feb 2017 10:27:43 GMT"}, the return value of this method would be {@code
|
||||
* warning value}.
|
||||
*
|
||||
* @param s the value of a warning header formatted according to RFC 7234.
|
||||
* @return the extracted warning value
|
||||
*/
|
||||
public static String extractWarningValueFromWarningHeader(final String s) {
|
||||
final Matcher matcher = WARNING_HEADER_PATTERN.matcher(s);
|
||||
final boolean matches = matcher.matches();
|
||||
assert matches;
|
||||
return matcher.group(1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Logs a deprecated message to the deprecation log, as well as to the local {@link ThreadContext}.
|
||||
*
|
||||
|
@ -120,16 +240,19 @@ public class DeprecationLogger {
|
|||
* @param params The parameters used to fill in the message, if any exist.
|
||||
*/
|
||||
@SuppressLoggerChecks(reason = "safely delegates to logger")
|
||||
void deprecated(Set<ThreadContext> threadContexts, String message, Object... params) {
|
||||
Iterator<ThreadContext> iterator = threadContexts.iterator();
|
||||
void deprecated(final Set<ThreadContext> threadContexts, final String message, final Object... params) {
|
||||
final Iterator<ThreadContext> iterator = threadContexts.iterator();
|
||||
|
||||
if (iterator.hasNext()) {
|
||||
final String formattedMessage = LoggerMessageFormat.format(message, params);
|
||||
|
||||
final String warningHeaderValue = formatWarning(formattedMessage);
|
||||
assert WARNING_HEADER_PATTERN.matcher(warningHeaderValue).matches();
|
||||
assert extractWarningValueFromWarningHeader(warningHeaderValue).equals(escape(formattedMessage));
|
||||
while (iterator.hasNext()) {
|
||||
try {
|
||||
iterator.next().addResponseHeader(WARNING_HEADER, formattedMessage);
|
||||
} catch (IllegalStateException e) {
|
||||
final ThreadContext next = iterator.next();
|
||||
next.addResponseHeader("Warning", warningHeaderValue, DeprecationLogger::extractWarningValueFromWarningHeader);
|
||||
} catch (final IllegalStateException e) {
|
||||
// ignored; it should be removed shortly
|
||||
}
|
||||
}
|
||||
|
@ -139,4 +262,25 @@ public class DeprecationLogger {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a warning string in the proper warning format by prepending a warn code, warn agent, wrapping the warning string in quotes,
|
||||
* and appending the RFC 7231 date.
|
||||
*
|
||||
* @param s the warning string to format
|
||||
* @return a warning value formatted according to RFC 7234
|
||||
*/
|
||||
public static String formatWarning(final String s) {
|
||||
return String.format(Locale.ROOT, WARNING_FORMAT, escape(s), RFC_7231_DATE_TIME.format(ZonedDateTime.now(GMT)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape backslashes and quotes in the specified string.
|
||||
*
|
||||
* @param s the string to escape
|
||||
* @return the escaped string
|
||||
*/
|
||||
public static String escape(String s) {
|
||||
return s.replaceAll("(\\\\|\")", "\\\\$1");
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -377,12 +376,12 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
if (exists(primary)) {
|
||||
return get(primary);
|
||||
}
|
||||
if (fallbackSetting == null) {
|
||||
return get(secondary);
|
||||
}
|
||||
if (exists(secondary)) {
|
||||
return get(secondary);
|
||||
}
|
||||
if (fallbackSetting == null) {
|
||||
return get(primary);
|
||||
}
|
||||
if (fallbackSetting.exists(primary)) {
|
||||
return fallbackSetting.get(primary);
|
||||
}
|
||||
|
|
|
@ -34,7 +34,9 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
@ -257,12 +259,25 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Add the <em>unique</em> response {@code value} for the specified {@code key}.
|
||||
* <p>
|
||||
* Any duplicate {@code value} is ignored.
|
||||
* Add the {@code value} for the specified {@code key} Any duplicate {@code value} is ignored.
|
||||
*
|
||||
* @param key the header name
|
||||
* @param value the header value
|
||||
*/
|
||||
public void addResponseHeader(String key, String value) {
|
||||
threadLocal.set(threadLocal.get().putResponse(key, value));
|
||||
public void addResponseHeader(final String key, final String value) {
|
||||
addResponseHeader(key, value, v -> v);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the {@code value} for the specified {@code key} with the specified {@code uniqueValue} used for de-duplication. Any duplicate
|
||||
* {@code value} after applying {@code uniqueValue} is ignored.
|
||||
*
|
||||
* @param key the header name
|
||||
* @param value the header value
|
||||
* @param uniqueValue the function that produces de-duplication values
|
||||
*/
|
||||
public void addResponseHeader(final String key, final String value, final Function<String, String> uniqueValue) {
|
||||
threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -396,14 +411,16 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders);
|
||||
}
|
||||
|
||||
private ThreadContextStruct putResponse(String key, String value) {
|
||||
private ThreadContextStruct putResponse(final String key, final String value, final Function<String, String> uniqueValue) {
|
||||
assert value != null;
|
||||
|
||||
final Map<String, List<String>> newResponseHeaders = new HashMap<>(this.responseHeaders);
|
||||
final List<String> existingValues = newResponseHeaders.get(key);
|
||||
|
||||
if (existingValues != null) {
|
||||
if (existingValues.contains(value)) {
|
||||
final Set<String> existingUniqueValues = existingValues.stream().map(uniqueValue).collect(Collectors.toSet());
|
||||
assert existingValues.size() == existingUniqueValues.size();
|
||||
if (existingUniqueValues.contains(uniqueValue.apply(value))) {
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -69,8 +69,9 @@ public final class MergeSchedulerConfig {
|
|||
private volatile int maxMergeCount;
|
||||
|
||||
MergeSchedulerConfig(IndexSettings indexSettings) {
|
||||
setMaxThreadAndMergeCount(indexSettings.getValue(MAX_THREAD_COUNT_SETTING),
|
||||
indexSettings.getValue(MAX_MERGE_COUNT_SETTING));
|
||||
int maxThread = indexSettings.getValue(MAX_THREAD_COUNT_SETTING);
|
||||
int maxMerge = indexSettings.getValue(MAX_MERGE_COUNT_SETTING);
|
||||
setMaxThreadAndMergeCount(maxThread, maxMerge);
|
||||
this.autoThrottle = indexSettings.getValue(AUTO_THROTTLE_SETTING);
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,9 @@
|
|||
*/
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import java.io.Reader;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.lucene.analysis.pattern.PatternReplaceCharFilter;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
|
@ -25,10 +28,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
||||
import java.io.Reader;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory {
|
||||
public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory implements MultiTermAwareComponent {
|
||||
|
||||
private final Pattern pattern;
|
||||
private final String replacement;
|
||||
|
@ -56,4 +56,9 @@ public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory {
|
|||
public Reader create(Reader tokenStream) {
|
||||
return new PatternReplaceCharFilter(pattern, replacement, tokenStream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getMultiTermComponent() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
|
@ -206,4 +207,11 @@ public class DisMaxQueryBuilder extends AbstractQueryBuilder<DisMaxQueryBuilder>
|
|||
public String getWriteableName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void extractInnerHitBuilders(Map<String, InnerHitBuilder> innerHits) {
|
||||
for (QueryBuilder query : queries) {
|
||||
InnerHitBuilder.extractInnerHits(query, innerHits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ public class ExistsQueryBuilder extends AbstractQueryBuilder<ExistsQueryBuilder>
|
|||
|
||||
public static Query newFilter(QueryShardContext context, String fieldPattern) {
|
||||
final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType =
|
||||
(FieldNamesFieldMapper.FieldNamesFieldType)context.getMapperService().fullName(FieldNamesFieldMapper.NAME);
|
||||
(FieldNamesFieldMapper.FieldNamesFieldType) context.getMapperService().fullName(FieldNamesFieldMapper.NAME);
|
||||
if (fieldNamesFieldType == null) {
|
||||
// can only happen when no types exist, so no docs exist either
|
||||
return Queries.newMatchNoDocsQuery("Missing types in \"" + NAME + "\" query.");
|
||||
|
@ -144,6 +144,11 @@ public class ExistsQueryBuilder extends AbstractQueryBuilder<ExistsQueryBuilder>
|
|||
fields = context.simpleMatchToIndexNames(fieldPattern);
|
||||
}
|
||||
|
||||
if (fields.size() == 1) {
|
||||
Query filter = fieldNamesFieldType.termQuery(fields.iterator().next(), context);
|
||||
return new ConstantScoreQuery(filter);
|
||||
}
|
||||
|
||||
BooleanQuery.Builder boolFilterBuilder = new BooleanQuery.Builder();
|
||||
for (String field : fields) {
|
||||
Query filter = fieldNamesFieldType.termQuery(field, context);
|
||||
|
|
|
@ -27,13 +27,18 @@ import org.apache.lucene.search.BooleanClause.Occur;
|
|||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.GraphQuery;
|
||||
import org.apache.lucene.search.MultiPhraseQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SynonymQuery;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
|
||||
import org.apache.lucene.search.spans.SpanNearQuery;
|
||||
import org.apache.lucene.search.spans.SpanOrQuery;
|
||||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.util.QueryBuilder;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -49,7 +54,6 @@ import org.elasticsearch.index.query.QueryShardContext;
|
|||
import org.elasticsearch.index.query.support.QueryParsers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
public class MatchQuery {
|
||||
|
||||
|
@ -318,17 +322,6 @@ public class MatchQuery {
|
|||
|
||||
public Query createPhrasePrefixQuery(String field, String queryText, int phraseSlop, int maxExpansions) {
|
||||
final Query query = createFieldQuery(getAnalyzer(), Occur.MUST, field, queryText, true, phraseSlop);
|
||||
if (query instanceof GraphQuery) {
|
||||
// we have a graph query, convert inner queries to multi phrase prefix queries
|
||||
List<Query> oldQueries = ((GraphQuery) query).getQueries();
|
||||
Query[] queries = new Query[oldQueries.size()];
|
||||
for (int i = 0; i < queries.length; i++) {
|
||||
queries[i] = toMultiPhrasePrefix(oldQueries.get(i), phraseSlop, maxExpansions);
|
||||
}
|
||||
|
||||
return new GraphQuery(queries);
|
||||
}
|
||||
|
||||
return toMultiPhrasePrefix(query, phraseSlop, maxExpansions);
|
||||
}
|
||||
|
||||
|
@ -340,6 +333,9 @@ public class MatchQuery {
|
|||
boost *= bq.getBoost();
|
||||
innerQuery = bq.getQuery();
|
||||
}
|
||||
if (query instanceof SpanQuery) {
|
||||
return toSpanQueryPrefix((SpanQuery) query, boost);
|
||||
}
|
||||
final MultiPhrasePrefixQuery prefixQuery = new MultiPhrasePrefixQuery();
|
||||
prefixQuery.setMaxExpansions(maxExpansions);
|
||||
prefixQuery.setSlop(phraseSlop);
|
||||
|
@ -369,6 +365,33 @@ public class MatchQuery {
|
|||
return query;
|
||||
}
|
||||
|
||||
private Query toSpanQueryPrefix(SpanQuery query, float boost) {
|
||||
if (query instanceof SpanTermQuery) {
|
||||
SpanMultiTermQueryWrapper<PrefixQuery> ret =
|
||||
new SpanMultiTermQueryWrapper<>(new PrefixQuery(((SpanTermQuery) query).getTerm()));
|
||||
return boost == 1 ? ret : new BoostQuery(ret, boost);
|
||||
} else if (query instanceof SpanNearQuery) {
|
||||
SpanNearQuery spanNearQuery = (SpanNearQuery) query;
|
||||
SpanQuery[] clauses = spanNearQuery.getClauses();
|
||||
if (clauses[clauses.length-1] instanceof SpanTermQuery) {
|
||||
clauses[clauses.length-1] = new SpanMultiTermQueryWrapper<>(
|
||||
new PrefixQuery(((SpanTermQuery) clauses[clauses.length-1]).getTerm())
|
||||
);
|
||||
}
|
||||
SpanNearQuery newQuery = new SpanNearQuery(clauses, spanNearQuery.getSlop(), spanNearQuery.isInOrder());
|
||||
return boost == 1 ? newQuery : new BoostQuery(newQuery, boost);
|
||||
} else if (query instanceof SpanOrQuery) {
|
||||
SpanOrQuery orQuery = (SpanOrQuery) query;
|
||||
SpanQuery[] clauses = new SpanQuery[orQuery.getClauses().length];
|
||||
for (int i = 0; i < clauses.length; i++) {
|
||||
clauses[i] = (SpanQuery) toSpanQueryPrefix(orQuery.getClauses()[i], 1);
|
||||
}
|
||||
return boost == 1 ? new SpanOrQuery(clauses) : new BoostQuery(new SpanOrQuery(clauses), boost);
|
||||
} else {
|
||||
return query;
|
||||
}
|
||||
}
|
||||
|
||||
public Query createCommonTermsQuery(String field, String queryText, Occur highFreqOccur, Occur lowFreqOccur, float
|
||||
maxTermFrequency, MappedFieldType fieldType) {
|
||||
Query booleanQuery = createBooleanQuery(field, queryText, lowFreqOccur);
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.search.BooleanClause;
|
|||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.IndexOrDocValuesQuery;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.PointRangeQuery;
|
||||
|
@ -58,6 +59,8 @@ public final class NestedHelper {
|
|||
return mightMatchNestedDocs(((TermQuery) query).getTerm().field());
|
||||
} else if (query instanceof PointRangeQuery) {
|
||||
return mightMatchNestedDocs(((PointRangeQuery) query).getField());
|
||||
} else if (query instanceof IndexOrDocValuesQuery) {
|
||||
return mightMatchNestedDocs(((IndexOrDocValuesQuery) query).getIndexQuery());
|
||||
} else if (query instanceof BooleanQuery) {
|
||||
final BooleanQuery bq = (BooleanQuery) query;
|
||||
final boolean hasRequiredClauses = bq.clauses().stream().anyMatch(BooleanClause::isRequired);
|
||||
|
@ -117,6 +120,8 @@ public final class NestedHelper {
|
|||
return mightMatchNonNestedDocs(((TermQuery) query).getTerm().field(), nestedPath);
|
||||
} else if (query instanceof PointRangeQuery) {
|
||||
return mightMatchNonNestedDocs(((PointRangeQuery) query).getField(), nestedPath);
|
||||
} else if (query instanceof IndexOrDocValuesQuery) {
|
||||
return mightMatchNonNestedDocs(((IndexOrDocValuesQuery) query).getIndexQuery(), nestedPath);
|
||||
} else if (query instanceof BooleanQuery) {
|
||||
final BooleanQuery bq = (BooleanQuery) query;
|
||||
final boolean hasRequiredClauses = bq.clauses().stream().anyMatch(BooleanClause::isRequired);
|
||||
|
|
|
@ -668,7 +668,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
public final long translogLocation;
|
||||
public final int size;
|
||||
|
||||
Location(long generation, long translogLocation, int size) {
|
||||
public Location(long generation, long translogLocation, int size) {
|
||||
this.generation = generation;
|
||||
this.translogLocation = translogLocation;
|
||||
this.size = size;
|
||||
|
|
|
@ -244,15 +244,24 @@ public final class ConfigurationUtils {
|
|||
|
||||
public static List<Processor> readProcessorConfigs(List<Map<String, Map<String, Object>>> processorConfigs,
|
||||
Map<String, Processor.Factory> processorFactories) throws Exception {
|
||||
Exception exception = null;
|
||||
List<Processor> processors = new ArrayList<>();
|
||||
if (processorConfigs != null) {
|
||||
for (Map<String, Map<String, Object>> processorConfigWithKey : processorConfigs) {
|
||||
for (Map.Entry<String, Map<String, Object>> entry : processorConfigWithKey.entrySet()) {
|
||||
processors.add(readProcessor(processorFactories, entry.getKey(), entry.getValue()));
|
||||
try {
|
||||
processors.add(readProcessor(processorFactories, entry.getKey(), entry.getValue()));
|
||||
} catch (Exception e) {
|
||||
exception = ExceptionsHelper.useOrSuppress(exception, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (exception != null) {
|
||||
throw exception;
|
||||
}
|
||||
|
||||
return processors;
|
||||
}
|
||||
|
||||
|
|
|
@ -164,12 +164,12 @@ public class PipelineStore extends AbstractComponent implements ClusterStateAppl
|
|||
|
||||
Map<String, Object> pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2();
|
||||
Pipeline pipeline = factory.create(request.getId(), pipelineConfig, processorFactories);
|
||||
List<IllegalArgumentException> exceptions = new ArrayList<>();
|
||||
List<Exception> exceptions = new ArrayList<>();
|
||||
for (Processor processor : pipeline.flattenAllProcessors()) {
|
||||
for (Map.Entry<DiscoveryNode, IngestInfo> entry : ingestInfos.entrySet()) {
|
||||
if (entry.getValue().containsProcessor(processor.getType()) == false) {
|
||||
String message = "Processor type [" + processor.getType() + "] is not installed on node [" + entry.getKey() + "]";
|
||||
exceptions.add(new IllegalArgumentException(message));
|
||||
exceptions.add(ConfigurationUtils.newConfigurationException(processor.getType(), processor.getTag(), null, message));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -187,9 +187,6 @@ public class OsProbe {
|
|||
return lines.get(0);
|
||||
}
|
||||
|
||||
// pattern for lines in /proc/self/cgroup
|
||||
private static final Pattern CONTROL_GROUP_PATTERN = Pattern.compile("\\d+:([^:]+):(/.*)");
|
||||
|
||||
// this property is to support a hack to workaround an issue with Docker containers mounting the cgroups hierarchy inconsistently with
|
||||
// respect to /proc/self/cgroup; for Docker containers this should be set to "/"
|
||||
private static final String CONTROL_GROUPS_HIERARCHY_OVERRIDE = System.getProperty("es.cgroups.hierarchy.override");
|
||||
|
@ -205,23 +202,29 @@ public class OsProbe {
|
|||
final List<String> lines = readProcSelfCgroup();
|
||||
final Map<String, String> controllerMap = new HashMap<>();
|
||||
for (final String line : lines) {
|
||||
final Matcher matcher = CONTROL_GROUP_PATTERN.matcher(line);
|
||||
// Matcher#matches must be invoked as matching is lazy; this can not happen in an assert as assertions might not be enabled
|
||||
final boolean matches = matcher.matches();
|
||||
assert matches : line;
|
||||
// at this point we have captured the subsystems and the control group
|
||||
final String[] controllers = matcher.group(1).split(",");
|
||||
/*
|
||||
* The virtual file /proc/self/cgroup lists the control groups that the Elasticsearch process is a member of. Each line contains
|
||||
* three colon-separated fields of the form hierarchy-ID:subsystem-list:cgroup-path. For cgroups version 1 hierarchies, the
|
||||
* subsystem-list is a comma-separated list of subsystems. The subsystem-list can be empty if the hierarchy represents a cgroups
|
||||
* version 2 hierarchy. For cgroups version 1
|
||||
*/
|
||||
final String[] fields = line.split(":");
|
||||
assert fields.length == 3;
|
||||
final String[] controllers = fields[1].split(",");
|
||||
for (final String controller : controllers) {
|
||||
final String controlGroupPath;
|
||||
if (CONTROL_GROUPS_HIERARCHY_OVERRIDE != null) {
|
||||
/*
|
||||
* Docker violates the relationship between /proc/self/cgroup and the /sys/fs/cgroup hierarchy. It's possible that this
|
||||
* will be fixed in future versions of Docker with cgroup namespaces, but this requires modern kernels. Thus, we provide
|
||||
* an undocumented hack for overriding the control group path. Do not rely on this hack, it will be removed.
|
||||
*/
|
||||
controllerMap.put(controller, CONTROL_GROUPS_HIERARCHY_OVERRIDE);
|
||||
controlGroupPath = CONTROL_GROUPS_HIERARCHY_OVERRIDE;
|
||||
} else {
|
||||
controllerMap.put(controller, matcher.group(2));
|
||||
controlGroupPath = fields[2];
|
||||
}
|
||||
final String previous = controllerMap.put(controller, controlGroupPath);
|
||||
assert previous == null;
|
||||
}
|
||||
}
|
||||
return controllerMap;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import java.nio.file.AtomicMoveNotSupportedException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
|
@ -83,7 +84,12 @@ class RemovePluginCommand extends EnvironmentAwareCommand {
|
|||
|
||||
terminal.println(VERBOSE, "Removing: " + pluginDir);
|
||||
final Path tmpPluginDir = env.pluginsFile().resolve(".removing-" + pluginName);
|
||||
Files.move(pluginDir, tmpPluginDir, StandardCopyOption.ATOMIC_MOVE);
|
||||
try {
|
||||
Files.move(pluginDir, tmpPluginDir, StandardCopyOption.ATOMIC_MOVE);
|
||||
} catch (final AtomicMoveNotSupportedException e) {
|
||||
// this can happen on a union filesystem when a plugin is not installed on the top layer; we fall back to a non-atomic move
|
||||
Files.move(pluginDir, tmpPluginDir);
|
||||
}
|
||||
pluginPaths.add(tmpPluginDir);
|
||||
|
||||
IOUtils.rm(pluginPaths.toArray(new Path[pluginPaths.size()]));
|
||||
|
|
|
@ -231,27 +231,25 @@ public class RestController extends AbstractComponent implements HttpServerTrans
|
|||
if (checkRequestParameters(request, channel) == false) {
|
||||
channel.sendResponse(BytesRestResponse.createSimpleErrorResponse(BAD_REQUEST, "error traces in responses are disabled."));
|
||||
} else {
|
||||
try (ThreadContext.StoredContext ignored = threadContext.stashContext()) {
|
||||
for (String key : headersToCopy) {
|
||||
String httpHeader = request.header(key);
|
||||
if (httpHeader != null) {
|
||||
threadContext.putHeader(key, httpHeader);
|
||||
}
|
||||
for (String key : headersToCopy) {
|
||||
String httpHeader = request.header(key);
|
||||
if (httpHeader != null) {
|
||||
threadContext.putHeader(key, httpHeader);
|
||||
}
|
||||
}
|
||||
|
||||
if (handler == null) {
|
||||
if (request.method() == RestRequest.Method.OPTIONS) {
|
||||
// when we have OPTIONS request, simply send OK by default (with the Access Control Origin header which gets automatically added)
|
||||
if (handler == null) {
|
||||
if (request.method() == RestRequest.Method.OPTIONS) {
|
||||
// when we have OPTIONS request, simply send OK by default (with the Access Control Origin header which gets automatically added)
|
||||
|
||||
channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY));
|
||||
} else {
|
||||
final String msg = "No handler found for uri [" + request.uri() + "] and method [" + request.method() + "]";
|
||||
channel.sendResponse(new BytesRestResponse(BAD_REQUEST, msg));
|
||||
}
|
||||
channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY));
|
||||
} else {
|
||||
final RestHandler wrappedHandler = Objects.requireNonNull(handlerWrapper.apply(handler));
|
||||
wrappedHandler.handleRequest(request, channel, client);
|
||||
final String msg = "No handler found for uri [" + request.uri() + "] and method [" + request.method() + "]";
|
||||
channel.sendResponse(new BytesRestResponse(BAD_REQUEST, msg));
|
||||
}
|
||||
} else {
|
||||
final RestHandler wrappedHandler = Objects.requireNonNull(handlerWrapper.apply(handler));
|
||||
wrappedHandler.handleRequest(request, channel, client);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,6 +90,7 @@ public class RestClusterStateAction extends BaseRestHandler {
|
|||
public RestResponse buildResponse(ClusterStateResponse response, XContentBuilder builder) throws Exception {
|
||||
builder.startObject();
|
||||
builder.field(Fields.CLUSTER_NAME, response.getClusterName().value());
|
||||
builder.byteSizeField(Fields.CLUSTER_STATE_SIZE_IN_BYTES, Fields.CLUSTER_STATE_SIZE, response.getTotalCompressedSize());
|
||||
response.getState().toXContent(builder, request);
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(RestStatus.OK, builder);
|
||||
|
@ -118,5 +119,7 @@ public class RestClusterStateAction extends BaseRestHandler {
|
|||
|
||||
static final class Fields {
|
||||
static final String CLUSTER_NAME = "cluster_name";
|
||||
static final String CLUSTER_STATE_SIZE = "compressed_size";
|
||||
static final String CLUSTER_STATE_SIZE_IN_BYTES = "compressed_size_in_bytes";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,8 @@ public abstract class InternalSingleBucketAggregation extends InternalAggregatio
|
|||
* @param docCount The document count in the single bucket.
|
||||
* @param aggregations The already built sub-aggregations that are associated with the bucket.
|
||||
*/
|
||||
protected InternalSingleBucketAggregation(String name, long docCount, InternalAggregations aggregations, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
|
||||
protected InternalSingleBucketAggregation(String name, long docCount, InternalAggregations aggregations,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
|
||||
super(name, pipelineAggregators, metaData);
|
||||
this.docCount = docCount;
|
||||
this.aggregations = aggregations;
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.filter;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -28,7 +29,8 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
public class InternalFilter extends InternalSingleBucketAggregation implements Filter {
|
||||
InternalFilter(String name, long docCount, InternalAggregations subAggregations, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
|
||||
InternalFilter(String name, long docCount, InternalAggregations subAggregations, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
super(name, docCount, subAggregations, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.common.lease.Releasables;
|
|||
import org.elasticsearch.common.util.LongHash;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
|
||||
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
|
||||
|
@ -39,9 +38,7 @@ import java.util.Map;
|
|||
|
||||
/**
|
||||
* Aggregates data expressed as GeoHash longs (for efficiency's sake) but formats results as Geohash strings.
|
||||
*
|
||||
*/
|
||||
|
||||
public class GeoHashGridAggregator extends BucketsAggregator {
|
||||
|
||||
private final int requiredSize;
|
||||
|
@ -49,7 +46,7 @@ public class GeoHashGridAggregator extends BucketsAggregator {
|
|||
private final GeoGridAggregationBuilder.CellIdSource valuesSource;
|
||||
private final LongHash bucketOrds;
|
||||
|
||||
public GeoHashGridAggregator(String name, AggregatorFactories factories, GeoGridAggregationBuilder.CellIdSource valuesSource,
|
||||
GeoHashGridAggregator(String name, AggregatorFactories factories, GeoGridAggregationBuilder.CellIdSource valuesSource,
|
||||
int requiredSize, int shardSize, SearchContext aggregationContext, Aggregator parent, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);
|
||||
|
@ -99,7 +96,7 @@ public class GeoHashGridAggregator extends BucketsAggregator {
|
|||
long bucketOrd;
|
||||
|
||||
OrdinalBucket() {
|
||||
super(0, 0, (InternalAggregations) null);
|
||||
super(0, 0, null);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -133,7 +130,7 @@ public class GeoHashGridAggregator extends BucketsAggregator {
|
|||
|
||||
@Override
|
||||
public InternalGeoHashGrid buildEmptyAggregation() {
|
||||
return new InternalGeoHashGrid(name, requiredSize, Collections.<InternalGeoHashGrid.Bucket> emptyList(), pipelineAggregators(), metaData());
|
||||
return new InternalGeoHashGrid(name, requiredSize, Collections.emptyList(), pipelineAggregators(), metaData());
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory<
|
|||
private final int requiredSize;
|
||||
private final int shardSize;
|
||||
|
||||
public GeoHashGridAggregatorFactory(String name, ValuesSourceConfig<GeoPoint> config, int precision, int requiredSize,
|
||||
GeoHashGridAggregatorFactory(String name, ValuesSourceConfig<GeoPoint> config, int precision, int requiredSize,
|
||||
int shardSize, SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
super(name, config, context, parent, subFactoriesBuilder, metaData);
|
||||
|
|
|
@ -24,17 +24,14 @@ import org.elasticsearch.common.ParseField;
|
|||
* Encapsulates relevant parameter defaults and validations for the geo hash grid aggregation.
|
||||
*/
|
||||
final class GeoHashGridParams {
|
||||
/* default values */
|
||||
public static final int DEFAULT_PRECISION = 5;
|
||||
public static final int DEFAULT_MAX_NUM_CELLS = 10000;
|
||||
|
||||
/* recognized field names in JSON */
|
||||
public static final ParseField FIELD_PRECISION = new ParseField("precision");
|
||||
public static final ParseField FIELD_SIZE = new ParseField("size");
|
||||
public static final ParseField FIELD_SHARD_SIZE = new ParseField("shard_size");
|
||||
static final ParseField FIELD_PRECISION = new ParseField("precision");
|
||||
static final ParseField FIELD_SIZE = new ParseField("size");
|
||||
static final ParseField FIELD_SHARD_SIZE = new ParseField("shard_size");
|
||||
|
||||
|
||||
public static int checkPrecision(int precision) {
|
||||
static int checkPrecision(int precision) {
|
||||
if ((precision < 1) || (precision > 12)) {
|
||||
throw new IllegalArgumentException("Invalid geohash aggregation precision of " + precision
|
||||
+ ". Must be between 1 and 12.");
|
||||
|
|
|
@ -36,6 +36,7 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
|
||||
|
@ -74,7 +75,6 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation<Internal
|
|||
aggregations.writeTo(out);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String getKeyAsString() {
|
||||
return GeoHashUtils.stringEncode(geohashAsLong);
|
||||
|
@ -126,12 +126,28 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation<Internal
|
|||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Bucket bucket = (Bucket) o;
|
||||
return geohashAsLong == bucket.geohashAsLong &&
|
||||
docCount == bucket.docCount &&
|
||||
Objects.equals(aggregations, bucket.aggregations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(geohashAsLong, docCount, aggregations);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private final int requiredSize;
|
||||
private final List<Bucket> buckets;
|
||||
|
||||
public InternalGeoHashGrid(String name, int requiredSize, List<Bucket> buckets, List<PipelineAggregator> pipelineAggregators,
|
||||
InternalGeoHashGrid(String name, int requiredSize, List<Bucket> buckets, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
super(name, pipelineAggregators, metaData);
|
||||
this.requiredSize = requiredSize;
|
||||
|
@ -175,7 +191,6 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation<Internal
|
|||
|
||||
@Override
|
||||
public InternalGeoHashGrid doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
|
||||
|
||||
LongObjectPagedHashMap<List<Bucket>> buckets = null;
|
||||
for (InternalAggregation aggregation : aggregations) {
|
||||
InternalGeoHashGrid grid = (InternalGeoHashGrid) aggregation;
|
||||
|
@ -216,6 +231,23 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation<Internal
|
|||
return builder;
|
||||
}
|
||||
|
||||
// package protected for testing
|
||||
int getRequiredSize() {
|
||||
return requiredSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(requiredSize, buckets);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean doEquals(Object obj) {
|
||||
InternalGeoHashGrid other = (InternalGeoHashGrid) obj;
|
||||
return Objects.equals(requiredSize, other.requiredSize) &&
|
||||
Objects.equals(buckets, other.buckets);
|
||||
}
|
||||
|
||||
static class BucketPriorityQueue extends PriorityQueue<Bucket> {
|
||||
|
||||
BucketPriorityQueue(int size) {
|
||||
|
@ -224,14 +256,14 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation<Internal
|
|||
|
||||
@Override
|
||||
protected boolean lessThan(Bucket o1, Bucket o2) {
|
||||
long i = o2.getDocCount() - o1.getDocCount();
|
||||
if (i == 0) {
|
||||
i = o2.compareTo(o1);
|
||||
if (i == 0) {
|
||||
i = System.identityHashCode(o2) - System.identityHashCode(o1);
|
||||
int cmp = Long.compare(o2.getDocCount(), o1.getDocCount());
|
||||
if (cmp == 0) {
|
||||
cmp = o2.compareTo(o1);
|
||||
if (cmp == 0) {
|
||||
cmp = System.identityHashCode(o2) - System.identityHashCode(o1);
|
||||
}
|
||||
}
|
||||
return i > 0;
|
||||
return cmp > 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Implementation of {@link Histogram}.
|
||||
|
@ -76,6 +77,24 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
aggregations = InternalAggregations.readAggregations(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != InternalDateHistogram.Bucket.class) {
|
||||
return false;
|
||||
}
|
||||
InternalDateHistogram.Bucket that = (InternalDateHistogram.Bucket) obj;
|
||||
// No need to take the keyed and format parameters into account,
|
||||
// they are already stored and tested on the InternalDateHistogram object
|
||||
return key == that.key
|
||||
&& docCount == that.docCount
|
||||
&& Objects.equals(aggregations, that.aggregations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(getClass(), key, docCount, aggregations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeLong(key);
|
||||
|
@ -169,6 +188,21 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
out.writeOptionalWriteable(bounds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
EmptyBucketInfo that = (EmptyBucketInfo) obj;
|
||||
return Objects.equals(rounding, that.rounding)
|
||||
&& Objects.equals(bounds, that.bounds)
|
||||
&& Objects.equals(subAggregations, that.subAggregations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(getClass(), rounding, bounds, subAggregations);
|
||||
}
|
||||
}
|
||||
|
||||
private final List<Bucket> buckets;
|
||||
|
@ -446,4 +480,21 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
public Bucket createBucket(Number key, long docCount, InternalAggregations aggregations) {
|
||||
return new Bucket(key.longValue(), docCount, keyed, format, aggregations);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean doEquals(Object obj) {
|
||||
InternalDateHistogram that = (InternalDateHistogram) obj;
|
||||
return Objects.equals(buckets, that.buckets)
|
||||
&& Objects.equals(order, that.order)
|
||||
&& Objects.equals(format, that.format)
|
||||
&& Objects.equals(keyed, that.keyed)
|
||||
&& Objects.equals(minDocCount, that.minDocCount)
|
||||
&& Objects.equals(offset, that.offset)
|
||||
&& Objects.equals(emptyBucketInfo, that.emptyBucketInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(buckets, order, format, keyed, minDocCount, offset, emptyBucketInfo);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,8 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
public class InternalMissing extends InternalSingleBucketAggregation implements Missing {
|
||||
InternalMissing(String name, long docCount, InternalAggregations aggregations, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
|
||||
InternalMissing(String name, long docCount, InternalAggregations aggregations, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
super(name, docCount, aggregations, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,8 +31,8 @@ import java.util.Map;
|
|||
* Result of the {@link ReverseNestedAggregator}.
|
||||
*/
|
||||
public class InternalReverseNested extends InternalSingleBucketAggregation implements ReverseNested {
|
||||
public InternalReverseNested(String name, long docCount, InternalAggregations aggregations, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
public InternalReverseNested(String name, long docCount, InternalAggregations aggregations,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
|
||||
super(name, docCount, aggregations, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
|
|
|
@ -16,8 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket;
|
||||
package org.elasticsearch.search.aggregations.bucket.sampler;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
|
@ -34,11 +33,11 @@ import org.elasticsearch.common.util.BigArrays;
|
|||
import org.elasticsearch.common.util.ObjectArray;
|
||||
import org.elasticsearch.search.aggregations.BucketCollector;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -48,13 +47,11 @@ import java.util.List;
|
|||
* {@link BestDocsDeferringCollector#createTopDocsCollector(int)} is designed to
|
||||
* be overridden and allows subclasses to choose a custom collector
|
||||
* implementation for determining the top N matches.
|
||||
*
|
||||
*/
|
||||
|
||||
public class BestDocsDeferringCollector extends DeferringBucketCollector implements Releasable {
|
||||
final List<PerSegmentCollects> entries = new ArrayList<>();
|
||||
BucketCollector deferred;
|
||||
ObjectArray<PerParentBucketSamples> perBucketSamples;
|
||||
private final List<PerSegmentCollects> entries = new ArrayList<>();
|
||||
private BucketCollector deferred;
|
||||
private ObjectArray<PerParentBucketSamples> perBucketSamples;
|
||||
private int shardSize;
|
||||
private PerSegmentCollects perSegCollector;
|
||||
private final BigArrays bigArrays;
|
||||
|
@ -65,14 +62,12 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
|
|||
* @param shardSize
|
||||
* The number of top-scoring docs to collect for each bucket
|
||||
*/
|
||||
public BestDocsDeferringCollector(int shardSize, BigArrays bigArrays) {
|
||||
BestDocsDeferringCollector(int shardSize, BigArrays bigArrays) {
|
||||
this.shardSize = shardSize;
|
||||
this.bigArrays = bigArrays;
|
||||
perBucketSamples = bigArrays.newObjectArray(1);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return true;
|
||||
|
@ -126,7 +121,6 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
|
|||
}
|
||||
|
||||
private void runDeferredAggs() throws IOException {
|
||||
|
||||
List<ScoreDoc> allDocs = new ArrayList<>(shardSize);
|
||||
for (int i = 0; i < perBucketSamples.size(); i++) {
|
||||
PerParentBucketSamples perBucketSample = perBucketSamples.get(i);
|
||||
|
@ -138,15 +132,12 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
|
|||
|
||||
// Sort the top matches by docID for the benefit of deferred collector
|
||||
ScoreDoc[] docsArr = allDocs.toArray(new ScoreDoc[allDocs.size()]);
|
||||
Arrays.sort(docsArr, new Comparator<ScoreDoc>() {
|
||||
@Override
|
||||
public int compare(ScoreDoc o1, ScoreDoc o2) {
|
||||
if(o1.doc == o2.doc){
|
||||
return o1.shardIndex - o2.shardIndex;
|
||||
}
|
||||
return o1.doc - o2.doc;
|
||||
}
|
||||
});
|
||||
Arrays.sort(docsArr, (o1, o2) -> {
|
||||
if(o1.doc == o2.doc){
|
||||
return o1.shardIndex - o2.shardIndex;
|
||||
}
|
||||
return o1.doc - o2.doc;
|
||||
});
|
||||
try {
|
||||
for (PerSegmentCollects perSegDocs : entries) {
|
||||
perSegDocs.replayRelatedMatches(docsArr);
|
||||
|
@ -295,7 +286,6 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
public int getDocCount(long parentBucket) {
|
||||
PerParentBucketSamples sampler = perBucketSamples.get((int) parentBucket);
|
||||
if (sampler == null) {
|
|
@ -43,7 +43,7 @@ public class DiversifiedAggregatorFactory extends ValuesSourceAggregatorFactory<
|
|||
private final int maxDocsPerValue;
|
||||
private final String executionHint;
|
||||
|
||||
public DiversifiedAggregatorFactory(String name, ValuesSourceConfig<ValuesSource> config, int shardSize, int maxDocsPerValue,
|
||||
DiversifiedAggregatorFactory(String name, ValuesSourceConfig<ValuesSource> config, int shardSize, int maxDocsPerValue,
|
||||
String executionHint, SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
super(name, config, context, parent, subFactoriesBuilder, metaData);
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.ElasticsearchException;
|
|||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector;
|
||||
import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
|
@ -48,7 +47,7 @@ public class DiversifiedBytesHashSamplerAggregator extends SamplerAggregator {
|
|||
private ValuesSource valuesSource;
|
||||
private int maxDocsPerValue;
|
||||
|
||||
public DiversifiedBytesHashSamplerAggregator(String name, int shardSize, AggregatorFactories factories,
|
||||
DiversifiedBytesHashSamplerAggregator(String name, int shardSize, AggregatorFactories factories,
|
||||
SearchContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData,
|
||||
ValuesSource valuesSource,
|
||||
int maxDocsPerValue) throws IOException {
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.common.util.BytesRefHash;
|
|||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector;
|
||||
import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
|
@ -47,7 +46,7 @@ public class DiversifiedMapSamplerAggregator extends SamplerAggregator {
|
|||
private int maxDocsPerValue;
|
||||
private BytesRefHash bucketOrds;
|
||||
|
||||
public DiversifiedMapSamplerAggregator(String name, int shardSize, AggregatorFactories factories,
|
||||
DiversifiedMapSamplerAggregator(String name, int shardSize, AggregatorFactories factories,
|
||||
SearchContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData,
|
||||
ValuesSource valuesSource, int maxDocsPerValue) throws IOException {
|
||||
super(name, shardSize, factories, context, parent, pipelineAggregators, metaData);
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.lucene.search.TopDocsCollector;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector;
|
||||
import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
|
@ -43,7 +42,7 @@ public class DiversifiedNumericSamplerAggregator extends SamplerAggregator {
|
|||
private ValuesSource.Numeric valuesSource;
|
||||
private int maxDocsPerValue;
|
||||
|
||||
public DiversifiedNumericSamplerAggregator(String name, int shardSize, AggregatorFactories factories,
|
||||
DiversifiedNumericSamplerAggregator(String name, int shardSize, AggregatorFactories factories,
|
||||
SearchContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData,
|
||||
ValuesSource.Numeric valuesSource, int maxDocsPerValue) throws IOException {
|
||||
super(name, shardSize, factories, context, parent, pipelineAggregators, metaData);
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey;
|
|||
import org.apache.lucene.search.TopDocsCollector;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector;
|
||||
import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
|
@ -44,7 +43,7 @@ public class DiversifiedOrdinalsSamplerAggregator extends SamplerAggregator {
|
|||
private ValuesSource.Bytes.WithOrdinals.FieldData valuesSource;
|
||||
private int maxDocsPerValue;
|
||||
|
||||
public DiversifiedOrdinalsSamplerAggregator(String name, int shardSize, AggregatorFactories factories,
|
||||
DiversifiedOrdinalsSamplerAggregator(String name, int shardSize, AggregatorFactories factories,
|
||||
SearchContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData,
|
||||
ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, int maxDocsPerValue) throws IOException {
|
||||
super(name, shardSize, factories, context, parent, pipelineAggregators, metaData);
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.search.aggregations.Aggregator;
|
|||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector;
|
||||
import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
@ -53,7 +52,6 @@ public class SamplerAggregator extends SingleBucketAggregator {
|
|||
public static final ParseField MAX_DOCS_PER_VALUE_FIELD = new ParseField("max_docs_per_value");
|
||||
public static final ParseField EXECUTION_HINT_FIELD = new ParseField("execution_hint");
|
||||
|
||||
|
||||
public enum ExecutionMode {
|
||||
|
||||
MAP(new ParseField("map")) {
|
||||
|
@ -141,7 +139,7 @@ public class SamplerAggregator extends SingleBucketAggregator {
|
|||
protected final int shardSize;
|
||||
protected BestDocsDeferringCollector bdd;
|
||||
|
||||
public SamplerAggregator(String name, int shardSize, AggregatorFactories factories, SearchContext context,
|
||||
SamplerAggregator(String name, int shardSize, AggregatorFactories factories, SearchContext context,
|
||||
Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
super(name, factories, context, parent, pipelineAggregators, metaData);
|
||||
this.shardSize = shardSize;
|
||||
|
@ -156,10 +154,8 @@ public class SamplerAggregator extends SingleBucketAggregator {
|
|||
public DeferringBucketCollector getDeferringCollector() {
|
||||
bdd = new BestDocsDeferringCollector(shardSize, context.bigArrays());
|
||||
return bdd;
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected boolean shouldDefer(Aggregator aggregator) {
|
||||
return true;
|
||||
|
@ -193,4 +189,3 @@ public class SamplerAggregator extends SingleBucketAggregator {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ public class SamplerAggregatorFactory extends AggregatorFactory<SamplerAggregato
|
|||
|
||||
private final int shardSize;
|
||||
|
||||
public SamplerAggregatorFactory(String name, int shardSize, SearchContext context, AggregatorFactory<?> parent,
|
||||
SamplerAggregatorFactory(String name, int shardSize, SearchContext context, AggregatorFactory<?> parent,
|
||||
AggregatorFactories.Builder subFactories, Map<String, Object> metaData) throws IOException {
|
||||
super(name, context, parent, subFactories, metaData);
|
||||
this.shardSize = shardSize;
|
||||
|
|
|
@ -31,7 +31,7 @@ import java.util.Map;
|
|||
public class UnmappedSampler extends InternalSampler {
|
||||
public static final String NAME = "unmapped_sampler";
|
||||
|
||||
public UnmappedSampler(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
|
||||
UnmappedSampler(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
|
||||
super(name, 0, InternalAggregations.EMPTY, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
|||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
@ -99,4 +100,21 @@ public abstract class InternalMappedSignificantTerms<
|
|||
protected SignificanceHeuristic getSignificanceHeuristic() {
|
||||
return significanceHeuristic;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean doEquals(Object obj) {
|
||||
InternalMappedSignificantTerms<?, ?> that = (InternalMappedSignificantTerms<?, ?>) obj;
|
||||
return super.doEquals(obj)
|
||||
&& Objects.equals(format, that.format)
|
||||
&& subsetSize == that.subsetSize
|
||||
&& supersetSize == that.supersetSize
|
||||
&& Objects.equals(significanceHeuristic, that.significanceHeuristic)
|
||||
&& Objects.equals(buckets, that.buckets)
|
||||
&& Objects.equals(bucketMap, that.bucketMap);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(super.doHashCode(), format, subsetSize, supersetSize, significanceHeuristic, buckets, bucketMap);
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue