Merge remote-tracking branch 'es/7.x' into enrich-7.x

This commit is contained in:
Martijn van Groningen 2019-08-13 09:12:31 +02:00
commit 1951cdf1cb
No known key found for this signature in database
GPG Key ID: AB236F4FCF2AF12A
713 changed files with 9092 additions and 5379 deletions

View File

@ -70,6 +70,11 @@ projectsLoaded {
maven configCache()
}
}
rootProject {
project.pluginManager.withPlugin('com.gradle.build-scan') {
buildScan.server = 'https://gradle-enterprise.elastic.co'
}
}
}
final String buildCacheUrl = System.getProperty('org.elasticsearch.build.cache.url')

19
Vagrantfile vendored
View File

@ -41,6 +41,16 @@ Vagrant.configure(2) do |config|
# the elasticsearch project called vagrant....
config.vm.synced_folder '.', '/vagrant', disabled: true
config.vm.synced_folder '.', '/elasticsearch'
# TODO: make these syncs work for windows!!!
config.vm.synced_folder "#{Dir.home}/.vagrant/gradle/caches/jars-3", "/root/.gradle/caches/jars-3",
create: true,
owner: "vagrant"
config.vm.synced_folder "#{Dir.home}/.vagrant/gradle/caches/modules-2", "/root/.gradle/caches/modules-2",
create: true,
owner: "vagrant"
config.vm.synced_folder "#{Dir.home}/.gradle/wrapper", "/root/.gradle/wrapper",
create: true,
owner: "vagrant"
# Expose project directory. Note that VAGRANT_CWD may not be the same as Dir.pwd
PROJECT_DIR = ENV['VAGRANT_PROJECT_DIR'] || Dir.pwd
@ -380,10 +390,6 @@ export ZIP=/elasticsearch/distribution/zip/build/distributions
export TAR=/elasticsearch/distribution/tar/build/distributions
export RPM=/elasticsearch/distribution/rpm/build/distributions
export DEB=/elasticsearch/distribution/deb/build/distributions
export BATS=/project/build/bats
export BATS_UTILS=/project/build/packaging/bats/utils
export BATS_TESTS=/project/build/packaging/bats/tests
export PACKAGING_ARCHIVES=/project/build/packaging/archives
export PACKAGING_TESTS=/project/build/packaging/tests
VARS
cat \<\<SUDOERS_VARS > /etc/sudoers.d/elasticsearch_vars
@ -391,11 +397,10 @@ Defaults env_keep += "ZIP"
Defaults env_keep += "TAR"
Defaults env_keep += "RPM"
Defaults env_keep += "DEB"
Defaults env_keep += "BATS"
Defaults env_keep += "BATS_UTILS"
Defaults env_keep += "BATS_TESTS"
Defaults env_keep += "PACKAGING_ARCHIVES"
Defaults env_keep += "PACKAGING_TESTS"
Defaults env_keep += "BATS_UTILS"
Defaults env_keep += "BATS_TESTS"
Defaults env_keep += "JAVA_HOME"
Defaults env_keep += "SYSTEM_JAVA_HOME"
SUDOERS_VARS

View File

@ -24,12 +24,15 @@ import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.BwcVersions
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
import org.elasticsearch.gradle.tool.Boilerplate
import org.gradle.util.GradleVersion
import org.gradle.util.DistributionLocator
import org.gradle.plugins.ide.eclipse.model.SourceFolder
import static org.elasticsearch.gradle.tool.Boilerplate.maybeConfigure
plugins {
id 'com.gradle.build-scan' version '2.3'
id 'com.gradle.build-scan' version '2.4'
id 'base'
id 'elasticsearch.global-build-info'
}
@ -212,7 +215,7 @@ task branchConsistency {
allprojects {
// ignore missing javadocs
tasks.withType(Javadoc) { Javadoc javadoc ->
tasks.withType(Javadoc).configureEach { Javadoc javadoc ->
// the -quiet here is because of a bug in gradle, in that adding a string option
// by itself is not added to the options. By adding quiet, both this option and
// the "value" -quiet is added, separated by a space. This is ok since the javadoc
@ -329,13 +332,9 @@ allprojects {
}
}
task cleanIdeaBuildDir(type: Delete) {
delete 'build-idea'
tasks.named('cleanIdea') {
delete 'build-idea'
}
cleanIdeaBuildDir.setGroup("ide")
cleanIdeaBuildDir.setDescription("Deletes the IDEA build directory.")
tasks.cleanIdea.dependsOn(cleanIdeaBuildDir)
}
idea {
@ -390,29 +389,20 @@ allprojects {
String lineSeparator = Os.isFamily(Os.FAMILY_WINDOWS) ? '\\\\r\\\\n' : '\\\\n'
String licenseHeader = licenseHeaderFile.getText('UTF-8').replace(System.lineSeparator(), lineSeparator)
task copyEclipseSettings(type: Copy) {
tasks.register('copyEclipseSettings', Copy) {
mustRunAfter 'wipeEclipseSettings'
// TODO: "package this up" for external builds
from new File(project.rootDir, 'buildSrc/src/main/resources/eclipse.settings')
into '.settings'
filter{ it.replaceAll('@@LICENSE_HEADER_TEXT@@', licenseHeader)}
}
// otherwise .settings is not nuked entirely
task wipeEclipseSettings(type: Delete) {
tasks.register('wipeEclipseSettings', Delete) {
delete '.settings'
}
tasks.cleanEclipse.dependsOn(wipeEclipseSettings)
tasks.named('cleanEclipse') { dependsOn 'wipeEclipseSettings' }
// otherwise the eclipse merging is *super confusing*
tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings)
// work arround https://github.com/gradle/gradle/issues/6582
tasks.eclipseProject.mustRunAfter tasks.cleanEclipseProject
tasks.matching { it.name == 'eclipseClasspath' }.all {
it.mustRunAfter { tasks.cleanEclipseClasspath }
}
tasks.matching { it.name == 'eclipseJdt' }.all {
it.mustRunAfter { tasks.cleanEclipseJdt }
}
tasks.copyEclipseSettings.mustRunAfter tasks.wipeEclipseSettings
tasks.named('eclipse') { dependsOn 'cleanEclipse', 'copyEclipseSettings' }
}
allprojects {
@ -477,13 +467,11 @@ gradle.projectsEvaluated {
* need to publish artifacts for them.
*/
if (project.name.equals('qa') || project.path.contains(':qa:')) {
Task assemble = project.tasks.findByName('assemble')
if (assemble) {
assemble.enabled = false
maybeConfigure(project.tasks, 'assemble') {
it.enabled = false
}
Task dependenciesInfo = project.tasks.findByName('dependenciesInfo')
if (dependenciesInfo) {
dependenciesInfo.enabled = false
maybeConfigure(project.tasks, 'dependenciesInfo') {
it.enabled = false
}
}
}
@ -505,7 +493,7 @@ gradle.projectsEvaluated {
}
allprojects {
task resolveAllDependencies {
tasks.register('resolveAllDependencies') {
dependsOn tasks.matching { it.name == "pullFixture"}
doLast {
configurations.findAll { it.isCanBeResolved() }.each { it.resolve() }
@ -535,13 +523,13 @@ allprojects {
}
}
task checkPart1
task checkPart2
tasks.matching { it.name == "check" }.all { check ->
if (check.path.startsWith(":x-pack:")) {
checkPart2.dependsOn check
} else {
checkPart1.dependsOn check
}
}
def checkPart1 = tasks.register('checkPart1')
def checkPart2 = tasks.register('checkPart2')
plugins.withId('lifecycle-base') {
if (project.path.startsWith(":x-pack:")) {
checkPart1.configure { dependsOn 'check' }
} else {
checkPart2.configure { dependsOn 'check' }
}
}
}

View File

@ -72,6 +72,7 @@ sourceSets {
}
allprojects {
apply plugin: 'java'
targetCompatibility = 11
sourceCompatibility = 11
}
@ -87,9 +88,6 @@ compileMinimumRuntimeJava {
jar {
from sourceSets.minimumRuntime.output
into('META-INF') {
from configurations.reaper
}
}
javadoc {
@ -127,7 +125,6 @@ dependencies {
testCompile "junit:junit:${props.getProperty('junit')}"
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}"
testCompile 'com.github.tomakehurst:wiremock-jre8-standalone:2.23.2'
reaper project('reaper')
minimumRuntimeCompile "junit:junit:${props.getProperty('junit')}"
minimumRuntimeCompile localGroovy()
minimumRuntimeCompile gradleApi()
@ -143,6 +140,10 @@ if (project == rootProject) {
mavenLocal()
}
}
dependencies {
// add this so the runtime classpath so Gradle will properly track it as a build runtime classpath input
runtimeOnly project('reaper')
}
// only run tests as build-tools
test.enabled = false
}
@ -177,9 +178,11 @@ if (project != rootProject) {
configurations {
distribution
reaper
}
dependencies {
reaper project('reaper')
distribution project(':distribution:archives:windows-zip')
distribution project(':distribution:archives:oss-windows-zip')
distribution project(':distribution:archives:darwin-tar')
@ -191,6 +194,9 @@ if (project != rootProject) {
// for external projects we want to remove the marker file indicating we are running the Elasticsearch project
processResources {
exclude 'buildSrc.marker'
into('META-INF') {
from configurations.reaper
}
}
String localDownloads = "${rootProject.buildDir}/local-downloads"

View File

@ -1,6 +1,5 @@
apply plugin: 'java'
jar {
archiveName = "${project.name}.jar"
manifest {
attributes 'Main-Class': 'org.elasticsearch.gradle.reaper.Reaper'
}

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.gradle
import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin
import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar
import groovy.transform.CompileDynamic
@ -66,6 +67,7 @@ import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
import org.gradle.api.publish.maven.tasks.GenerateMavenPom
import org.gradle.api.tasks.SourceSet
import org.gradle.api.tasks.SourceSetContainer
import org.gradle.api.tasks.TaskProvider
import org.gradle.api.tasks.bundling.Jar
import org.gradle.api.tasks.compile.GroovyCompile
import org.gradle.api.tasks.compile.JavaCompile
@ -82,10 +84,11 @@ import org.gradle.process.ExecSpec
import org.gradle.util.GradleVersion
import java.nio.charset.StandardCharsets
import java.time.ZoneOffset
import java.time.ZonedDateTime
import java.nio.file.Files
import java.util.regex.Matcher
import static org.elasticsearch.gradle.tool.Boilerplate.maybeConfigure
/**
* Encapsulates build configuration for elasticsearch projects.
*/
@ -127,7 +130,7 @@ class BuildPlugin implements Plugin<Project> {
// apply global test task failure listener
project.rootProject.pluginManager.apply(TestFailureReportingPlugin)
project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask)
project.getTasks().register("buildResources", ExportElasticsearchBuildResourcesTask)
setupSeed(project)
configureRepositories(project)
@ -154,7 +157,7 @@ class BuildPlugin implements Plugin<Project> {
ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension)
// Common config when running with a FIPS-140 runtime JVM
if (ext.has('inFipsJvm') && ext.get('inFipsJvm')) {
project.tasks.withType(Test) { Test task ->
project.tasks.withType(Test).configureEach { Test task ->
task.systemProperty 'javax.net.ssl.trustStorePassword', 'password'
task.systemProperty 'javax.net.ssl.keyStorePassword', 'password'
}
@ -530,7 +533,7 @@ class BuildPlugin implements Plugin<Project> {
static void configurePomGeneration(Project project) {
// Only works with `enableFeaturePreview('STABLE_PUBLISHING')`
// https://github.com/gradle/gradle/issues/5696#issuecomment-396965185
project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask ->
project.tasks.withType(GenerateMavenPom.class).configureEach({ GenerateMavenPom generatePOMTask ->
// The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it,
// just make a copy.
ExtraPropertiesExtension ext = generatePOMTask.extensions.getByType(ExtraPropertiesExtension)
@ -546,12 +549,15 @@ class BuildPlugin implements Plugin<Project> {
}
}
}
// build poms with assemble (if the assemble task exists)
Task assemble = project.tasks.findByName('assemble')
if (assemble && assemble.enabled) {
assemble.dependsOn(generatePOMTask)
} as Action<GenerateMavenPom>)
// build poms with assemble (if the assemble task exists)
maybeConfigure(project.tasks, 'assemble') { assemble ->
if (assemble.enabled) {
assemble.dependsOn(project.tasks.withType(GenerateMavenPom))
}
}
project.plugins.withType(MavenPublishPlugin).whenPluginAdded {
PublishingExtension publishing = project.extensions.getByType(PublishingExtension)
publishing.publications.all { MavenPublication publication -> // we only deal with maven
@ -607,7 +613,7 @@ class BuildPlugin implements Plugin<Project> {
project.afterEvaluate {
File compilerJavaHome = ext.get('compilerJavaHome') as File
project.tasks.withType(JavaCompile) { JavaCompile compileTask ->
project.tasks.withType(JavaCompile).configureEach({ JavaCompile compileTask ->
final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(compileTask.targetCompatibility)
// we only fork if the Gradle JDK is not the same as the compiler JDK
if (compilerJavaHome.canonicalPath == Jvm.current().javaHome.canonicalPath) {
@ -644,9 +650,9 @@ class BuildPlugin implements Plugin<Project> {
// TODO: use native Gradle support for --release when available (cf. https://github.com/gradle/gradle/issues/2510)
compileTask.options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion
}
} as Action<JavaCompile>)
// also apply release flag to groovy, which is used in build-tools
project.tasks.withType(GroovyCompile) { GroovyCompile compileTask ->
project.tasks.withType(GroovyCompile).configureEach({ GroovyCompile compileTask ->
// we only fork if the Gradle JDK is not the same as the compiler JDK
if (compilerJavaHome.canonicalPath == Jvm.current().javaHome.canonicalPath) {
compileTask.options.fork = false
@ -655,19 +661,23 @@ class BuildPlugin implements Plugin<Project> {
compileTask.options.forkOptions.javaHome = compilerJavaHome
compileTask.options.compilerArgs << '--release' << JavaVersion.toVersion(compileTask.targetCompatibility).majorVersion
}
}
} as Action<GroovyCompile>)
}
}
static void configureJavadoc(Project project) {
// remove compiled classes from the Javadoc classpath: http://mail.openjdk.java.net/pipermail/javadoc-dev/2018-January/000400.html
final List<File> classes = new ArrayList<>()
project.tasks.withType(JavaCompile) { JavaCompile javaCompile ->
project.tasks.withType(JavaCompile).configureEach { JavaCompile javaCompile ->
classes.add(javaCompile.destinationDir)
}
project.tasks.withType(Javadoc) { Javadoc javadoc ->
project.tasks.withType(Javadoc).configureEach { Javadoc javadoc ->
File compilerJavaHome = project.extensions.getByType(ExtraPropertiesExtension).get('compilerJavaHome') as File
javadoc.executable = new File(compilerJavaHome, 'bin/javadoc')
// only explicitly set javadoc executable if compiler JDK is different from Gradle
// this ensures better cacheability as setting ths input to an absolute path breaks portability
if (Files.isSameFile(compilerJavaHome.toPath(), Jvm.current().getJavaHome().toPath()) == false) {
javadoc.executable = new File(compilerJavaHome, 'bin/javadoc')
}
javadoc.classpath = javadoc.getClasspath().filter { f ->
return classes.contains(f) == false
}
@ -682,21 +692,27 @@ class BuildPlugin implements Plugin<Project> {
/** Adds a javadocJar task to generate a jar containing javadocs. */
static void configureJavadocJar(Project project) {
Jar javadocJarTask = project.tasks.create('javadocJar', Jar)
javadocJarTask.classifier = 'javadoc'
javadocJarTask.group = 'build'
javadocJarTask.description = 'Assembles a jar containing javadocs.'
javadocJarTask.from(project.tasks.getByName(JavaPlugin.JAVADOC_TASK_NAME))
project.tasks.getByName(BasePlugin.ASSEMBLE_TASK_NAME).dependsOn(javadocJarTask)
TaskProvider<Jar> javadocJarTask = project.tasks.register('javadocJar', Jar, { Jar jar ->
jar.archiveClassifier.set('javadoc')
jar.group = 'build'
jar.description = 'Assembles a jar containing javadocs.'
jar.from(project.tasks.named(JavaPlugin.JAVADOC_TASK_NAME))
} as Action<Jar>)
maybeConfigure(project.tasks, BasePlugin.ASSEMBLE_TASK_NAME) { Task t ->
t.dependsOn(javadocJarTask)
}
}
static void configureSourcesJar(Project project) {
Jar sourcesJarTask = project.tasks.create('sourcesJar', Jar)
sourcesJarTask.classifier = 'sources'
sourcesJarTask.group = 'build'
sourcesJarTask.description = 'Assembles a jar containing source files.'
sourcesJarTask.from(project.extensions.getByType(SourceSetContainer).getByName(SourceSet.MAIN_SOURCE_SET_NAME).allSource)
project.tasks.getByName(BasePlugin.ASSEMBLE_TASK_NAME).dependsOn(sourcesJarTask)
TaskProvider<Jar> sourcesJarTask = project.tasks.register('sourcesJar', Jar, { Jar jar ->
jar.archiveClassifier.set('sources')
jar.group = 'build'
jar.description = 'Assembles a jar containing source files.'
jar.from(project.extensions.getByType(SourceSetContainer).getByName(SourceSet.MAIN_SOURCE_SET_NAME).allSource)
} as Action<Jar>)
maybeConfigure(project.tasks, BasePlugin.ASSEMBLE_TASK_NAME) { Task t ->
t.dependsOn(sourcesJarTask)
}
}
/** Adds additional manifest info to jars */
@ -704,7 +720,7 @@ class BuildPlugin implements Plugin<Project> {
ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension)
ext.set('licenseFile', null)
ext.set('noticeFile', null)
project.tasks.withType(Jar) { Jar jarTask ->
project.tasks.withType(Jar).configureEach { Jar jarTask ->
// we put all our distributable files under distributions
jarTask.destinationDir = new File(project.buildDir, 'distributions')
// fixup the jar manifest
@ -720,9 +736,10 @@ class BuildPlugin implements Plugin<Project> {
'Build-Date': ext.get('buildDate'),
'Build-Java-Version': compilerJavaVersion)
}
// add license/notice files
project.afterEvaluate {
}
// add license/notice files
project.afterEvaluate {
project.tasks.withType(Jar).configureEach { Jar jarTask ->
if (ext.has('licenseFile') == false || ext.get('licenseFile') == null || ext.has('noticeFile') == false || ext.get('noticeFile') == null) {
throw new GradleException("Must specify license and notice file for project ${project.path}")
}
@ -748,8 +765,8 @@ class BuildPlugin implements Plugin<Project> {
* normal jar with the shadow jar so we no longer want to run
* the jar task.
*/
project.tasks.getByName(JavaPlugin.JAR_TASK_NAME).enabled = false
project.tasks.getByName('shadowJar').configure { ShadowJar shadowJar ->
project.tasks.named(JavaPlugin.JAR_TASK_NAME).configure { it.enabled = false }
project.tasks.named('shadowJar').configure { ShadowJar shadowJar ->
/*
* Replace the default "shadow" classifier with null
* which will leave the classifier off of the file name.
@ -766,7 +783,9 @@ class BuildPlugin implements Plugin<Project> {
shadowJar.configurations = [project.configurations.getByName('bundle')]
}
// Make sure we assemble the shadow jar
project.tasks.getByName(BasePlugin.ASSEMBLE_TASK_NAME).dependsOn project.tasks.getByName('shadowJar')
project.tasks.named(BasePlugin.ASSEMBLE_TASK_NAME).configure {
it.dependsOn project.tasks.named('shadowJar')
}
project.artifacts.add('apiElements', project.tasks.getByName('shadowJar'))
}
}
@ -775,7 +794,7 @@ class BuildPlugin implements Plugin<Project> {
ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension)
// Default test task should run only unit tests
project.tasks.withType(Test).matching { Test task -> task.name == 'test' }.all { Test task ->
maybeConfigure(project.tasks, 'test', Test) { Test task ->
task.include '**/*Tests.class'
}
@ -783,7 +802,7 @@ class BuildPlugin implements Plugin<Project> {
if (project.path != ':build-tools') {
File heapdumpDir = new File(project.buildDir, 'heapdump')
project.tasks.withType(Test) { Test test ->
project.tasks.withType(Test).configureEach { Test test ->
File testOutputDir = new File(test.reports.junitXml.getDestination(), "output")
ErrorReportingTestListener listener = new ErrorReportingTestListener(test.testLogging, testOutputDir)
@ -894,30 +913,37 @@ class BuildPlugin implements Plugin<Project> {
}
private static configurePrecommit(Project project) {
Task precommit = PrecommitTasks.create(project, true)
project.tasks.getByName(LifecycleBasePlugin.CHECK_TASK_NAME).dependsOn(precommit)
project.tasks.getByName(JavaPlugin.TEST_TASK_NAME).mustRunAfter(precommit)
TaskProvider precommit = PrecommitTasks.create(project, true)
project.tasks.named(LifecycleBasePlugin.CHECK_TASK_NAME).configure { it.dependsOn(precommit) }
project.tasks.named(JavaPlugin.TEST_TASK_NAME).configure { it.mustRunAfter(precommit) }
// only require dependency licenses for non-elasticsearch deps
(project.tasks.getByName('dependencyLicenses') as DependencyLicensesTask).dependencies = project.configurations.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME).fileCollection { Dependency dependency ->
dependency.group.startsWith('org.elasticsearch') == false
} - project.configurations.getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME)
project.plugins.withType(ShadowPlugin).whenPluginAdded {
(project.tasks.getByName('dependencyLicenses') as DependencyLicensesTask).dependencies += project.configurations.getByName('bundle').fileCollection { Dependency dependency ->
project.tasks.withType(DependencyLicensesTask).named('dependencyLicenses').configure {
it.dependencies = project.configurations.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME).fileCollection { Dependency dependency ->
dependency.group.startsWith('org.elasticsearch') == false
} - project.configurations.getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME)
}
project.plugins.withType(ShadowPlugin).whenPluginAdded {
project.tasks.withType(DependencyLicensesTask).named('dependencyLicenses').configure {
it.dependencies += project.configurations.getByName('bundle').fileCollection { Dependency dependency ->
dependency.group.startsWith('org.elasticsearch') == false
}
}
}
}
private static configureDependenciesInfo(Project project) {
DependenciesInfoTask deps = project.tasks.create("dependenciesInfo", DependenciesInfoTask)
deps.runtimeConfiguration = project.configurations.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME)
TaskProvider<DependenciesInfoTask> deps = project.tasks.register("dependenciesInfo", DependenciesInfoTask, { DependenciesInfoTask task ->
task.runtimeConfiguration = project.configurations.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME)
task.compileOnlyConfiguration = project.configurations.getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME)
task.getConventionMapping().map('mappings') {
(project.tasks.getByName('dependencyLicenses') as DependencyLicensesTask).mappings
}
} as Action<DependenciesInfoTask>)
project.plugins.withType(ShadowPlugin).whenPluginAdded {
deps.runtimeConfiguration = project.configurations.create('infoDeps')
deps.runtimeConfiguration.extendsFrom(project.configurations.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME), project.configurations.getByName('bundle'))
}
deps.compileOnlyConfiguration = project.configurations.getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME)
project.afterEvaluate {
deps.mappings = (project.tasks.getByName('dependencyLicenses') as DependencyLicensesTask).mappings
deps.configure { task ->
task.runtimeConfiguration = project.configurations.create('infoDeps')
task.runtimeConfiguration.extendsFrom(project.configurations.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME), project.configurations.getByName('bundle'))
}
}
}

View File

@ -25,6 +25,7 @@ import org.gradle.api.artifacts.Configuration
import org.gradle.api.artifacts.Dependency
import org.gradle.api.artifacts.DependencyResolutionListener
import org.gradle.api.artifacts.DependencySet
import org.gradle.api.internal.ConventionTask
import org.gradle.api.tasks.Input
import org.gradle.api.tasks.InputDirectory
import org.gradle.api.tasks.OutputFile
@ -45,7 +46,7 @@ import java.util.regex.Pattern
* </ul>
*
*/
public class DependenciesInfoTask extends DefaultTask {
public class DependenciesInfoTask extends ConventionTask {
/** Dependencies to gather information from. */
@Input
@ -55,8 +56,7 @@ public class DependenciesInfoTask extends DefaultTask {
@Input
public Configuration compileOnlyConfiguration
@Input
public LinkedHashMap<String, String> mappings
private LinkedHashMap<String, String> mappings
/** Directory to read license files */
@InputDirectory
@ -93,7 +93,7 @@ public class DependenciesInfoTask extends DefaultTask {
}
final String url = createURL(dependency.group, dependency.name, dependency.version)
final String dependencyName = DependencyLicensesTask.getDependencyName(mappings, dependency.name)
final String dependencyName = DependencyLicensesTask.getDependencyName(getMappings(), dependency.name)
logger.info("mapped dependency ${dependency.group}:${dependency.name} to ${dependencyName} for license info")
final String licenseType = getLicenseType(dependency.group, dependencyName)
@ -103,7 +103,15 @@ public class DependenciesInfoTask extends DefaultTask {
outputFile.setText(output.toString(), 'UTF-8')
}
/**
@Input
LinkedHashMap<String, String> getMappings() {
return mappings
}
void setMappings(LinkedHashMap<String, String> mappings) {
this.mappings = mappings
}
/**
* Create an URL on <a href="https://repo1.maven.org/maven2/">Maven Central</a>
* based on dependency coordinates.
*/

View File

@ -32,6 +32,7 @@ import org.gradle.api.InvalidUserDataException
import org.gradle.api.Plugin
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.plugins.BasePlugin
import org.gradle.api.publish.maven.MavenPublication
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
import org.gradle.api.publish.maven.tasks.GenerateMavenPom
@ -112,7 +113,7 @@ class PluginBuildPlugin implements Plugin<Project> {
addNoticeGeneration(project, extension)
}
}
project.testingConventions {
project.tasks.named('testingConventions').configure {
naming.clear()
naming {
Tests {
@ -175,7 +176,7 @@ class PluginBuildPlugin implements Plugin<Project> {
/** Adds an integTest task which runs rest tests */
private static void createIntegTestTask(Project project) {
RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
integTest.mustRunAfter(project.precommit, project.test)
integTest.mustRunAfter('precommit', 'test')
if (project.plugins.hasPlugin(TestClustersPlugin.class) == false) {
// only if not using test clusters
project.integTestCluster.distribution = System.getProperty('tests.distribution', 'integ-test-zip')
@ -259,7 +260,9 @@ class PluginBuildPlugin implements Plugin<Project> {
include 'bin/**'
}
}
project.assemble.dependsOn(bundle)
project.tasks.named(BasePlugin.ASSEMBLE_TASK_NAME).configure {
dependsOn(bundle)
}
// also make the zip available as a configuration (used when depending on this project)
project.configurations.create('zip')

View File

@ -26,11 +26,10 @@ import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.tool.ClasspathUtils
import org.gradle.api.JavaVersion
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.artifacts.Configuration
import org.gradle.api.plugins.JavaBasePlugin
import org.gradle.api.plugins.JavaPluginConvention
import org.gradle.api.plugins.quality.Checkstyle
import org.gradle.api.tasks.TaskProvider
/**
* Validation tasks which should be run before committing. These run before tests.
@ -41,7 +40,7 @@ class PrecommitTasks {
public static final String CHECKSTYLE_VERSION = '8.20'
public static Task create(Project project, boolean includeDependencyLicenses) {
public static TaskProvider create(Project project, boolean includeDependencyLicenses) {
project.configurations.create("forbiddenApisCliJar")
project.dependencies {
forbiddenApisCliJar('de.thetaphi:forbiddenapis:2.6')
@ -57,12 +56,12 @@ class PrecommitTasks {
}
}
List<Task> precommitTasks = [
List<TaskProvider> precommitTasks = [
configureCheckstyle(project),
configureForbiddenApisCli(project),
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
project.tasks.create('filepermissions', FilePermissionsTask.class),
project.tasks.register('forbiddenPatterns', ForbiddenPatternsTask),
project.tasks.register('licenseHeaders', LicenseHeadersTask),
project.tasks.register('filepermissions', FilePermissionsTask),
configureJarHell(project, jarHellConfig),
configureThirdPartyAudit(project),
configureTestingConventions(project)
@ -71,11 +70,12 @@ class PrecommitTasks {
// tasks with just tests don't need dependency licenses, so this flag makes adding
// the task optional
if (includeDependencyLicenses) {
DependencyLicensesTask dependencyLicenses = project.tasks.create('dependencyLicenses', DependencyLicensesTask.class)
TaskProvider<DependencyLicensesTask> dependencyLicenses = project.tasks.register('dependencyLicenses', DependencyLicensesTask)
precommitTasks.add(dependencyLicenses)
// we also create the updateShas helper task that is associated with dependencyLicenses
UpdateShasTask updateShas = project.tasks.create('updateShas', UpdateShasTask.class)
updateShas.parentTask = dependencyLicenses
project.tasks.register('updateShas', UpdateShasTask) {
it.parentTask = dependencyLicenses
}
}
if (project.path != ':build-tools') {
/*
@ -93,35 +93,36 @@ class PrecommitTasks {
// We want to get any compilation error before running the pre-commit checks.
project.sourceSets.all { sourceSet ->
precommitTasks.each { task ->
task.shouldRunAfter(sourceSet.getClassesTaskName())
precommitTasks.each { provider ->
provider.configure {
shouldRunAfter(sourceSet.getClassesTaskName())
}
}
}
return project.tasks.create([
name : 'precommit',
group : JavaBasePlugin.VERIFICATION_GROUP,
description: 'Runs all non-test checks.',
dependsOn : precommitTasks
])
return project.tasks.register('precommit') {
group = JavaBasePlugin.VERIFICATION_GROUP
description = 'Runs all non-test checks.'
dependsOn = precommitTasks
}
}
static Task configureTestingConventions(Project project) {
TestingConventionsTasks task = project.getTasks().create("testingConventions", TestingConventionsTasks.class)
task.naming {
Tests {
baseClass "org.apache.lucene.util.LuceneTestCase"
}
IT {
baseClass "org.elasticsearch.test.ESIntegTestCase"
baseClass 'org.elasticsearch.test.rest.ESRestTestCase'
static TaskProvider configureTestingConventions(Project project) {
return project.getTasks().register("testingConventions", TestingConventionsTasks) {
naming {
Tests {
baseClass "org.apache.lucene.util.LuceneTestCase"
}
IT {
baseClass "org.elasticsearch.test.ESIntegTestCase"
baseClass 'org.elasticsearch.test.rest.ESRestTestCase'
}
}
}
return task
}
private static Task configureJarHell(Project project, Configuration jarHelConfig) {
return project.tasks.create('jarHell', JarHellTask) { task ->
private static TaskProvider configureJarHell(Project project, Configuration jarHelConfig) {
return project.tasks.register('jarHell', JarHellTask) { task ->
task.classpath = project.sourceSets.test.runtimeClasspath + jarHelConfig;
if (project.plugins.hasPlugin(ShadowPlugin)) {
task.classpath += project.configurations.bundle
@ -130,9 +131,9 @@ class PrecommitTasks {
}
}
private static Task configureThirdPartyAudit(Project project) {
private static TaskProvider configureThirdPartyAudit(Project project) {
ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources')
return project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) { task ->
return project.tasks.register('thirdPartyAudit', ThirdPartyAuditTask) { task ->
task.dependsOn(buildResources)
task.signatureFile = buildResources.copy("forbidden/third-party-audit.txt")
task.javaHome = project.runtimeJavaHome
@ -140,10 +141,10 @@ class PrecommitTasks {
}
}
private static Task configureForbiddenApisCli(Project project) {
private static TaskProvider configureForbiddenApisCli(Project project) {
project.pluginManager.apply(ForbiddenApisPlugin)
ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources')
project.tasks.withType(CheckForbiddenApis) {
project.tasks.withType(CheckForbiddenApis).configureEach {
dependsOn(buildResources)
doFirst {
// we need to defer this configuration since we don't know the runtime java version until execution time
@ -183,12 +184,14 @@ class PrecommitTasks {
)
}
}
Task forbiddenApis = project.tasks.getByName("forbiddenApis")
forbiddenApis.group = ""
TaskProvider forbiddenApis = project.tasks.named("forbiddenApis")
forbiddenApis.configure {
group = ""
}
return forbiddenApis
}
private static Task configureCheckstyle(Project project) {
private static TaskProvider configureCheckstyle(Project project) {
// Always copy the checkstyle configuration files to 'buildDir/checkstyle' since the resources could be located in a jar
// file. If the resources are located in a jar, Gradle will fail when it tries to turn the URL into a file
URL checkstyleConfUrl = PrecommitTasks.getResource("/checkstyle.xml")
@ -196,29 +199,39 @@ class PrecommitTasks {
File checkstyleDir = new File(project.buildDir, "checkstyle")
File checkstyleSuppressions = new File(checkstyleDir, "checkstyle_suppressions.xml")
File checkstyleConf = new File(checkstyleDir, "checkstyle.xml");
Task copyCheckstyleConf = project.tasks.create("copyCheckstyleConf")
TaskProvider copyCheckstyleConf = project.tasks.register("copyCheckstyleConf")
// configure inputs and outputs so up to date works properly
copyCheckstyleConf.outputs.files(checkstyleSuppressions, checkstyleConf)
copyCheckstyleConf.configure {
outputs.files(checkstyleSuppressions, checkstyleConf)
}
if ("jar".equals(checkstyleConfUrl.getProtocol())) {
JarURLConnection jarURLConnection = (JarURLConnection) checkstyleConfUrl.openConnection()
copyCheckstyleConf.inputs.file(jarURLConnection.getJarFileURL())
copyCheckstyleConf.configure {
inputs.file(jarURLConnection.getJarFileURL())
}
} else if ("file".equals(checkstyleConfUrl.getProtocol())) {
copyCheckstyleConf.inputs.files(checkstyleConfUrl.getFile(), checkstyleSuppressionsUrl.getFile())
}
copyCheckstyleConf.doLast {
checkstyleDir.mkdirs()
// withStream will close the output stream and IOGroovyMethods#getBytes reads the InputStream fully and closes it
new FileOutputStream(checkstyleConf).withStream {
it.write(checkstyleConfUrl.openStream().getBytes())
}
new FileOutputStream(checkstyleSuppressions).withStream {
it.write(checkstyleSuppressionsUrl.openStream().getBytes())
copyCheckstyleConf.configure {
inputs.files(checkstyleConfUrl.getFile(), checkstyleSuppressionsUrl.getFile())
}
}
Task checkstyleTask = project.tasks.create('checkstyle')
copyCheckstyleConf.configure {
doLast {
checkstyleDir.mkdirs()
// withStream will close the output stream and IOGroovyMethods#getBytes reads the InputStream fully and closes it
new FileOutputStream(checkstyleConf).withStream {
it.write(checkstyleConfUrl.openStream().getBytes())
}
new FileOutputStream(checkstyleSuppressions).withStream {
it.write(checkstyleSuppressionsUrl.openStream().getBytes())
}
}
}
TaskProvider checkstyleTask = project.tasks.register('checkstyle') {
dependsOn project.tasks.withType(Checkstyle)
}
// Apply the checkstyle plugin to create `checkstyleMain` and `checkstyleTest`. It only
// creates them if there is main or test code to check and it makes `check` depend
// on them. We also want `precommit` to depend on `checkstyle`.
@ -231,8 +244,7 @@ class PrecommitTasks {
toolVersion = CHECKSTYLE_VERSION
}
project.tasks.withType(Checkstyle) { task ->
checkstyleTask.dependsOn(task)
project.tasks.withType(Checkstyle).configureEach { task ->
task.dependsOn(copyCheckstyleConf)
task.inputs.file(checkstyleSuppressions)
task.reports {
@ -243,13 +255,13 @@ class PrecommitTasks {
return checkstyleTask
}
private static Task configureLoggerUsage(Project project) {
private static TaskProvider configureLoggerUsage(Project project) {
Object dependency = ClasspathUtils.isElasticsearchProject() ? project.project(':test:logger-usage') :
"org.elasticsearch.test:logger-usage:${VersionProperties.elasticsearch}"
project.configurations.create('loggerUsagePlugin')
project.dependencies.add('loggerUsagePlugin', dependency)
return project.tasks.create('loggerUsageCheck', LoggerUsageTask.class) {
return project.tasks.register('loggerUsageCheck', LoggerUsageTask) {
classpath = project.configurations.loggerUsagePlugin
}
}

View File

@ -0,0 +1,300 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.test;
import org.elasticsearch.gradle.BuildPlugin;
import org.elasticsearch.gradle.BwcVersions;
import org.elasticsearch.gradle.DistributionDownloadPlugin;
import org.elasticsearch.gradle.ElasticsearchDistribution;
import org.elasticsearch.gradle.ElasticsearchDistribution.Flavor;
import org.elasticsearch.gradle.ElasticsearchDistribution.Platform;
import org.elasticsearch.gradle.ElasticsearchDistribution.Type;
import org.elasticsearch.gradle.Jdk;
import org.elasticsearch.gradle.JdkDownloadPlugin;
import org.elasticsearch.gradle.Version;
import org.elasticsearch.gradle.VersionProperties;
import org.elasticsearch.gradle.tool.Boilerplate;
import org.elasticsearch.gradle.vagrant.BatsProgressLogger;
import org.elasticsearch.gradle.vagrant.VagrantBasePlugin;
import org.elasticsearch.gradle.vagrant.VagrantExtension;
import org.gradle.api.NamedDomainObjectContainer;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.artifacts.Configuration;
import org.gradle.api.file.Directory;
import org.gradle.api.plugins.ExtraPropertiesExtension;
import org.gradle.api.plugins.JavaBasePlugin;
import org.gradle.api.plugins.JavaPlugin;
import org.gradle.api.provider.Provider;
import org.gradle.api.tasks.Copy;
import org.gradle.api.tasks.TaskInputs;
import org.gradle.api.tasks.TaskProvider;
import org.gradle.api.tasks.testing.Test;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
import static org.elasticsearch.gradle.vagrant.VagrantMachine.convertLinuxPath;
import static org.elasticsearch.gradle.vagrant.VagrantMachine.convertWindowsPath;
public class DistroTestPlugin implements Plugin<Project> {
private static final String GRADLE_JDK_VERSION = "12.0.1+12@69cfe15208a647278a19ef0990eea691";
// all distributions used by distro tests. this is temporary until tests are per distribution
private static final String PACKAGING_DISTRIBUTION = "packaging";
private static final String COPY_PACKAGING_TASK = "copyPackagingArchives";
private static final String IN_VM_SYSPROP = "tests.inVM";
private static Version upgradeVersion;
private Provider<Directory> archivesDir;
private TaskProvider<Copy> copyPackagingArchives;
private Jdk gradleJdk;
@Override
public void apply(Project project) {
project.getPluginManager().apply(JdkDownloadPlugin.class);
project.getPluginManager().apply(DistributionDownloadPlugin.class);
project.getPluginManager().apply(VagrantBasePlugin.class);
project.getPluginManager().apply(JavaPlugin.class);
configureVM(project);
if (upgradeVersion == null) {
// just read this once, since it is the same for all projects. this is safe because gradle configuration is single threaded
upgradeVersion = getUpgradeVersion(project);
}
// setup task to run inside VM
configureDistributions(project);
configureCopyPackagingTask(project);
configureDistroTest(project);
configureBatsTest(project, "oss");
configureBatsTest(project, "default");
}
private static Jdk createJdk(NamedDomainObjectContainer<Jdk> jdksContainer, String name, String version, String platform) {
Jdk jdk = jdksContainer.create(name);
jdk.setVersion(version);
jdk.setPlatform(platform);
return jdk;
}
private static Version getUpgradeVersion(Project project) {
String upgradeFromVersionRaw = System.getProperty("tests.packaging.upgradeVersion");
if (upgradeFromVersionRaw != null) {
return Version.fromString(upgradeFromVersionRaw);
}
// was not passed in, so randomly choose one from bwc versions
ExtraPropertiesExtension extraProperties = project.getExtensions().getByType(ExtraPropertiesExtension.class);
if ((boolean) extraProperties.get("bwc_tests_enabled") == false) {
// Upgrade tests will go from current to current when the BWC tests are disabled to skip real BWC tests
return Version.fromString(project.getVersion().toString());
}
ExtraPropertiesExtension rootExtraProperties = project.getRootProject().getExtensions().getByType(ExtraPropertiesExtension.class);
String firstPartOfSeed = rootExtraProperties.get("testSeed").toString().split(":")[0];
final long seed = Long.parseUnsignedLong(firstPartOfSeed, 16);
BwcVersions bwcVersions = (BwcVersions) extraProperties.get("bwcVersions");
final List<Version> indexCompatVersions = bwcVersions.getIndexCompatible();
return indexCompatVersions.get(new Random(seed).nextInt(indexCompatVersions.size()));
}
private void configureVM(Project project) {
String box = project.getName();
// setup jdks used by the distro tests, and by gradle executing
NamedDomainObjectContainer<Jdk> jdksContainer = JdkDownloadPlugin.getContainer(project);
String platform = box.contains("windows") ? "windows" : "linux";
this.gradleJdk = createJdk(jdksContainer, "gradle", GRADLE_JDK_VERSION, platform);
// setup VM used by these tests
VagrantExtension vagrant = project.getExtensions().getByType(VagrantExtension.class);
vagrant.setBox(box);
vagrant.vmEnv("PATH", convertPath(project, vagrant, gradleJdk, "/bin:$PATH", "\\bin;$Env:PATH"));
vagrant.setIsWindowsVM(box.contains("windows"));
}
private static Object convertPath(Project project, VagrantExtension vagrant, Jdk jdk,
String additionaLinux, String additionalWindows) {
return new Object() {
@Override
public String toString() {
if (vagrant.isWindowsVM()) {
return convertWindowsPath(project, jdk.getPath()) + additionalWindows;
}
return convertLinuxPath(project, jdk.getPath()) + additionaLinux;
}
};
}
private void configureCopyPackagingTask(Project project) {
this.archivesDir = project.getParent().getLayout().getBuildDirectory().dir("packaging/archives");
// temporary, until we have tasks per distribution
this.copyPackagingArchives = Boilerplate.maybeRegister(project.getParent().getTasks(), COPY_PACKAGING_TASK, Copy.class,
t -> {
t.into(archivesDir);
t.from(project.getConfigurations().getByName(PACKAGING_DISTRIBUTION));
Path archivesPath = archivesDir.get().getAsFile().toPath();
// write bwc version, and append -SNAPSHOT if it is an unreleased version
ExtraPropertiesExtension extraProperties = project.getExtensions().getByType(ExtraPropertiesExtension.class);
BwcVersions bwcVersions = (BwcVersions) extraProperties.get("bwcVersions");
final String upgradeFromVersion;
if (bwcVersions.unreleasedInfo(upgradeVersion) != null) {
upgradeFromVersion = upgradeVersion.toString() + "-SNAPSHOT";
} else {
upgradeFromVersion = upgradeVersion.toString();
}
TaskInputs inputs = t.getInputs();
inputs.property("version", VersionProperties.getElasticsearch());
inputs.property("upgrade_from_version", upgradeFromVersion);
// TODO: this is serializable, need to think how to represent this as an input
//inputs.property("bwc_versions", bwcVersions);
t.doLast(action -> {
try {
Files.writeString(archivesPath.resolve("version"), VersionProperties.getElasticsearch());
Files.writeString(archivesPath.resolve("upgrade_from_version"), upgradeFromVersion);
// this is always true, but bats tests rely on it. It is just temporary until bats is removed.
Files.writeString(archivesPath.resolve("upgrade_is_oss"), "");
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
});
}
private void configureDistroTest(Project project) {
BuildPlugin.configureCompile(project);
BuildPlugin.configureRepositories(project);
BuildPlugin.configureTestTasks(project);
BuildPlugin.configureInputNormalization(project);
TaskProvider<Test> destructiveTest = project.getTasks().register("destructiveDistroTest", Test.class,
t -> {
t.setMaxParallelForks(1);
t.setWorkingDir(archivesDir.get());
if (System.getProperty(IN_VM_SYSPROP) == null) {
t.dependsOn(copyPackagingArchives, gradleJdk);
}
});
// setup outer task to run
project.getTasks().register("distroTest", GradleDistroTestTask.class,
t -> {
t.setGroup(JavaBasePlugin.VERIFICATION_GROUP);
t.setDescription("Runs distribution tests within vagrant");
t.setTaskName(project.getPath() + ":" + destructiveTest.getName());
t.extraArg("-D'" + IN_VM_SYSPROP + "'");
t.dependsOn(copyPackagingArchives, gradleJdk);
});
}
private void configureBatsTest(Project project, String type) {
// destructive task to run inside
TaskProvider<BatsTestTask> destructiveTest = project.getTasks().register("destructiveBatsTest." + type, BatsTestTask.class,
t -> {
// this is hacky for shared source, but bats are a temporary thing we are removing, so it is not worth
// the overhead of a real project dependency
Directory batsDir = project.getParent().getLayout().getProjectDirectory().dir("bats");
t.setTestsDir(batsDir.dir(type));
t.setUtilsDir(batsDir.dir("utils"));
t.setArchivesDir(archivesDir.get());
t.setPackageName("elasticsearch" + (type.equals("oss") ? "-oss" : ""));
if (System.getProperty(IN_VM_SYSPROP) == null) {
t.dependsOn(copyPackagingArchives, gradleJdk);
}
});
VagrantExtension vagrant = project.getExtensions().getByType(VagrantExtension.class);
// setup outer task to run
project.getTasks().register("batsTest." + type, GradleDistroTestTask.class,
t -> {
t.setGroup(JavaBasePlugin.VERIFICATION_GROUP);
t.setDescription("Runs bats tests within vagrant");
t.setTaskName(project.getPath() + ":" + destructiveTest.getName());
t.setProgressHandler(new BatsProgressLogger(project.getLogger()));
t.extraArg("-D'" + IN_VM_SYSPROP + "'");
t.dependsOn(copyPackagingArchives, gradleJdk);
t.onlyIf(spec -> vagrant.isWindowsVM() == false); // bats doesn't run on windows
});
}
private void configureDistributions(Project project) {
NamedDomainObjectContainer<ElasticsearchDistribution> distributions = DistributionDownloadPlugin.getContainer(project);
for (Type type : Arrays.asList(Type.DEB, Type.RPM)) {
for (Flavor flavor : Flavor.values()) {
for (boolean bundledJdk : Arrays.asList(true, false)) {
addDistro(distributions, type, null, flavor, bundledJdk, VersionProperties.getElasticsearch());
}
}
// upgrade version is always bundled jdk
// NOTE: this is mimicking the old VagrantTestPlugin upgrade behavior. It will eventually be replaced
// witha dedicated upgrade test from every bwc version like other bwc tests
addDistro(distributions, type, null, Flavor.DEFAULT, true, upgradeVersion.toString());
if (upgradeVersion.onOrAfter("6.3.0")) {
addDistro(distributions, type, null, Flavor.OSS, true, upgradeVersion.toString());
}
}
for (Platform platform : Arrays.asList(Platform.LINUX, Platform.WINDOWS)) {
for (Flavor flavor : Flavor.values()) {
for (boolean bundledJdk : Arrays.asList(true, false)) {
addDistro(distributions, Type.ARCHIVE, platform, flavor, bundledJdk, VersionProperties.getElasticsearch());
}
}
}
// temporary until distro tests have one test per distro
Configuration packagingConfig = project.getConfigurations().create(PACKAGING_DISTRIBUTION);
List<Configuration> distroConfigs = distributions.stream().map(ElasticsearchDistribution::getConfiguration)
.collect(Collectors.toList());
packagingConfig.setExtendsFrom(distroConfigs);
}
private static void addDistro(NamedDomainObjectContainer<ElasticsearchDistribution> distributions,
Type type, Platform platform, Flavor flavor, boolean bundledJdk, String version) {
String name = flavor + "-" + (type == Type.ARCHIVE ? platform + "-" : "") + type + (bundledJdk ? "" : "-no-jdk") + "-" + version;
if (distributions.findByName(name) != null) {
return;
}
distributions.create(name, d -> {
d.setFlavor(flavor);
d.setType(type);
if (type == Type.ARCHIVE) {
d.setPlatform(platform);
}
d.setBundledJdk(bundledJdk);
d.setVersion(version);
});
}
}

View File

@ -20,6 +20,7 @@ package org.elasticsearch.gradle.test
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.testclusters.ElasticsearchCluster
import org.elasticsearch.gradle.testclusters.RestTestRunnerTask
import org.elasticsearch.gradle.testclusters.TestClustersPlugin
import org.elasticsearch.gradle.tool.ClasspathUtils
import org.gradle.api.DefaultTask
@ -49,8 +50,6 @@ class RestIntegTestTask extends DefaultTask {
protected Test runner
protected Task clusterInit
/** Info about nodes in the integ test cluster. Note this is *not* available until runtime. */
List<NodeInfo> nodes
@ -61,8 +60,6 @@ class RestIntegTestTask extends DefaultTask {
RestIntegTestTask() {
runner = project.tasks.create("${name}Runner", RestTestRunnerTask.class)
super.dependsOn(runner)
clusterInit = project.tasks.create(name: "${name}Cluster#init", dependsOn: project.testClasses)
runner.dependsOn(clusterInit)
boolean usesTestclusters = project.plugins.hasPlugin(TestClustersPlugin.class)
if (usesTestclusters == false) {
clusterConfig = project.extensions.create("${name}Cluster", ClusterConfiguration.class, project)
@ -75,8 +72,6 @@ class RestIntegTestTask extends DefaultTask {
runner.useCluster project.testClusters."$name"
}
// override/add more for rest tests
runner.maxParallelForks = 1
runner.include('**/*IT.class')
runner.systemProperty('tests.rest.load_packaged', 'false')
@ -134,7 +129,6 @@ class RestIntegTestTask extends DefaultTask {
project.gradle.projectsEvaluated {
if (enabled == false) {
runner.enabled = false
clusterInit.enabled = false
return // no need to add cluster formation tasks if the task won't run!
}
if (usesTestclusters == false) {
@ -185,11 +179,6 @@ class RestIntegTestTask extends DefaultTask {
}
}
@Override
public Task mustRunAfter(Object... tasks) {
clusterInit.mustRunAfter(tasks)
}
public void runner(Closure configure) {
project.tasks.getByName("${name}Runner").configure(configure)
}

View File

@ -1,54 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.test
import org.elasticsearch.gradle.vagrant.VagrantCommandTask
import org.gradle.api.Task
/**
* A fixture for integration tests which runs in a virtual machine launched by Vagrant.
*/
class VagrantFixture extends VagrantCommandTask implements Fixture {
private VagrantCommandTask stopTask
public VagrantFixture() {
this.stopTask = project.tasks.create(name: "${name}#stop", type: VagrantCommandTask) {
command 'halt'
}
finalizedBy this.stopTask
}
@Override
void setBoxName(String boxName) {
super.setBoxName(boxName)
this.stopTask.setBoxName(boxName)
}
@Override
void setEnvironmentVars(Map<String, String> environmentVars) {
super.setEnvironmentVars(environmentVars)
this.stopTask.setEnvironmentVars(environmentVars)
}
@Override
public Task getStopTask() {
return this.stopTask
}
}

View File

@ -1,86 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.vagrant
import org.apache.commons.io.output.TeeOutputStream
import org.elasticsearch.gradle.LoggedExec
import org.gradle.api.tasks.Input
import org.gradle.api.tasks.Optional
import org.gradle.internal.logging.progress.ProgressLoggerFactory
import javax.inject.Inject
/**
* Runs a vagrant command. Pretty much like Exec task but with a nicer output
* formatter and defaults to `vagrant` as first part of commandLine.
*/
public class VagrantCommandTask extends LoggedExec {
@Input
String command
@Input @Optional
String subcommand
@Input
String boxName
@Input
Map<String, String> environmentVars
public VagrantCommandTask() {
executable = 'vagrant'
// We're using afterEvaluate here to slot in some logic that captures configurations and
// modifies the command line right before the main execution happens. The reason that we
// call doFirst instead of just doing the work in the afterEvaluate is that the latter
// restricts how subclasses can extend functionality. Calling afterEvaluate is like having
// all the logic of a task happening at construction time, instead of at execution time
// where a subclass can override or extend the logic.
project.afterEvaluate {
doFirst {
if (environmentVars != null) {
environment environmentVars
}
// Build our command line for vagrant
def vagrantCommand = [executable, command]
if (subcommand != null) {
vagrantCommand = vagrantCommand + subcommand
}
commandLine([*vagrantCommand, boxName, *args])
// It'd be nice if --machine-readable were, well, nice
standardOutput = new TeeOutputStream(standardOutput, createLoggerOutputStream())
}
}
}
@Inject
ProgressLoggerFactory getProgressLoggerFactory() {
throw new UnsupportedOperationException()
}
protected OutputStream createLoggerOutputStream() {
return new VagrantLoggerOutputStream(getProgressLoggerFactory().newOperation(boxName + " " + command).setDescription(boxName),
/* Vagrant tends to output a lot of stuff, but most of the important
stuff starts with ==> $box */
"==> $boxName: ")
}
}

View File

@ -1,67 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.vagrant
import org.elasticsearch.gradle.Version
import org.gradle.api.tasks.Input
class VagrantPropertiesExtension {
@Input
List<String> boxes
@Input
Version upgradeFromVersion
@Input
List<String> upgradeFromVersions
@Input
String batsDir
@Input
Boolean inheritTests
@Input
Boolean inheritTestUtils
@Input
String testClass
VagrantPropertiesExtension(List<String> availableBoxes) {
this.boxes = availableBoxes
this.batsDir = 'src/test/resources/packaging'
}
void boxes(String... boxes) {
this.boxes = Arrays.asList(boxes)
}
void setBatsDir(String batsDir) {
this.batsDir = batsDir
}
void setInheritTests(Boolean inheritTests) {
this.inheritTests = inheritTests
}
void setInheritTestUtils(Boolean inheritTestUtils) {
this.inheritTestUtils = inheritTestUtils
}
}

View File

@ -1,127 +0,0 @@
package org.elasticsearch.gradle.vagrant
import org.gradle.api.GradleException
import org.gradle.api.InvalidUserDataException
import org.gradle.api.Plugin
import org.gradle.api.Project
import org.gradle.process.ExecResult
import org.gradle.process.internal.ExecException
/**
* Global configuration for if Vagrant tasks are supported in this
* build environment.
*/
class VagrantSupportPlugin implements Plugin<Project> {
@Override
void apply(Project project) {
if (project.rootProject.ext.has('vagrantEnvChecksDone') == false) {
Map vagrantInstallation = getVagrantInstallation(project)
Map virtualBoxInstallation = getVirtualBoxInstallation(project)
project.rootProject.ext.vagrantInstallation = vagrantInstallation
project.rootProject.ext.virtualBoxInstallation = virtualBoxInstallation
project.rootProject.ext.vagrantSupported = vagrantInstallation.supported && virtualBoxInstallation.supported
project.rootProject.ext.vagrantEnvChecksDone = true
// Finding that HOME needs to be set when performing vagrant updates
String homeLocation = System.getenv("HOME")
if (project.rootProject.ext.vagrantSupported && homeLocation == null) {
throw new GradleException("Could not locate \$HOME environment variable. Vagrant is enabled " +
"and requires \$HOME to be set to function properly.")
}
}
addVerifyInstallationTasks(project)
}
private Map getVagrantInstallation(Project project) {
try {
ByteArrayOutputStream pipe = new ByteArrayOutputStream()
ExecResult runResult = project.exec {
commandLine 'vagrant', '--version'
standardOutput pipe
ignoreExitValue true
}
String version = pipe.toString().trim()
if (runResult.exitValue == 0) {
if (version ==~ /Vagrant 1\.(8\.[6-9]|9\.[0-9])+/ || version ==~ /Vagrant 2\.[0-9]+\.[0-9]+/) {
return [ 'supported' : true ]
} else {
return [ 'supported' : false,
'info' : "Illegal version of vagrant [${version}]. Need [Vagrant 1.8.6+]" ]
}
} else {
return [ 'supported' : false,
'info' : "Could not read installed vagrant version:\n" + version ]
}
} catch (ExecException e) {
// Exec still throws this if it cannot find the command, regardless if ignoreExitValue is set.
// Swallow error. Vagrant isn't installed. Don't halt the build here.
return [ 'supported' : false, 'info' : "Could not find vagrant: " + e.message ]
}
}
private Map getVirtualBoxInstallation(Project project) {
try {
ByteArrayOutputStream pipe = new ByteArrayOutputStream()
ExecResult runResult = project.exec {
commandLine 'vboxmanage', '--version'
standardOutput = pipe
ignoreExitValue true
}
String version = pipe.toString().trim()
if (runResult.exitValue == 0) {
try {
String[] versions = version.split('\\.')
int major = Integer.parseInt(versions[0])
int minor = Integer.parseInt(versions[1])
if ((major < 5) || (major == 5 && minor < 1)) {
return [ 'supported' : false,
'info' : "Illegal version of virtualbox [${version}]. Need [5.1+]" ]
} else {
return [ 'supported' : true ]
}
} catch (NumberFormatException | ArrayIndexOutOfBoundsException e) {
return [ 'supported' : false,
'info' : "Unable to parse version of virtualbox [${version}]. Required [5.1+]" ]
}
} else {
return [ 'supported': false, 'info': "Could not read installed virtualbox version:\n" + version ]
}
} catch (ExecException e) {
// Exec still throws this if it cannot find the command, regardless if ignoreExitValue is set.
// Swallow error. VirtualBox isn't installed. Don't halt the build here.
return [ 'supported' : false, 'info' : "Could not find virtualbox: " + e.message ]
}
}
private void addVerifyInstallationTasks(Project project) {
createCheckVagrantVersionTask(project)
createCheckVirtualBoxVersionTask(project)
}
private void createCheckVagrantVersionTask(Project project) {
project.tasks.create('vagrantCheckVersion') {
description 'Check the Vagrant version'
group 'Verification'
doLast {
if (project.rootProject.vagrantInstallation.supported == false) {
throw new InvalidUserDataException(project.rootProject.vagrantInstallation.info)
}
}
}
}
private void createCheckVirtualBoxVersionTask(Project project) {
project.tasks.create('virtualboxCheckVersion') {
description 'Check the Virtualbox version'
group 'Verification'
doLast {
if (project.rootProject.virtualBoxInstallation.supported == false) {
throw new InvalidUserDataException(project.rootProject.virtualBoxInstallation.info)
}
}
}
}
}

View File

@ -1,658 +0,0 @@
package org.elasticsearch.gradle.vagrant
import org.apache.tools.ant.taskdefs.condition.Os
import org.elasticsearch.gradle.BwcVersions
import org.elasticsearch.gradle.FileContentsTask
import org.elasticsearch.gradle.Jdk
import org.elasticsearch.gradle.JdkDownloadPlugin
import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.Version
import org.gradle.api.GradleException
import org.gradle.api.InvalidUserDataException
import org.gradle.api.NamedDomainObjectContainer
import org.gradle.api.Plugin
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.artifacts.dsl.RepositoryHandler
import org.gradle.api.execution.TaskExecutionAdapter
import org.gradle.api.internal.artifacts.dependencies.DefaultProjectDependency
import org.gradle.api.tasks.Copy
import org.gradle.api.tasks.Delete
import org.gradle.api.tasks.Exec
import org.gradle.api.tasks.StopExecutionException
import org.gradle.api.tasks.TaskState
import java.nio.file.Paths
import static java.util.Collections.unmodifiableList
class VagrantTestPlugin implements Plugin<Project> {
/** All Linux boxes that we test. These are all always supplied **/
static final List<String> LINUX_BOXES = unmodifiableList([
'centos-6',
'centos-7',
'debian-8',
'debian-9',
'fedora-28',
'fedora-29',
'oel-6',
'oel-7',
'opensuse-42',
/* TODO: need a real RHEL license now that it is out of beta 'rhel-8',*/
'sles-12',
'ubuntu-1604',
'ubuntu-1804'
])
/** All Windows boxes that we test, which may or may not be supplied **/
static final List<String> WINDOWS_BOXES = unmodifiableList([
'windows-2012r2',
'windows-2016'
])
/** All boxes that we test, some of which may not be supplied **/
static final List<String> ALL_BOXES = unmodifiableList(LINUX_BOXES + WINDOWS_BOXES)
/** Boxes used when sampling the tests **/
static final List<String> SAMPLE = unmodifiableList([
'centos-7',
'ubuntu-1604'
])
/** All distributions to bring into test VM, whether or not they are used **/
static final List<String> DISTRIBUTIONS = unmodifiableList([
'archives:linux-tar',
'archives:oss-linux-tar',
'archives:windows-zip',
'archives:oss-windows-zip',
'packages:rpm',
'packages:oss-rpm',
'packages:deb',
'packages:oss-deb',
'archives:no-jdk-linux-tar',
'archives:oss-no-jdk-linux-tar',
'archives:no-jdk-windows-zip',
'archives:oss-no-jdk-windows-zip',
'packages:no-jdk-rpm',
'packages:oss-no-jdk-rpm',
'packages:no-jdk-deb',
'packages:oss-no-jdk-deb'
])
/** Packages onboarded for upgrade tests **/
static final List<String> UPGRADE_FROM_ARCHIVES = unmodifiableList(['rpm', 'deb'])
private static final PACKAGING_CONFIGURATION = 'packaging'
private static final PACKAGING_TEST_CONFIGURATION = 'packagingTest'
private static final BATS = 'bats'
private static final String BATS_TEST_COMMAND ="cd \$PACKAGING_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS"
/** Boxes that have been supplied and are available for testing **/
List<String> availableBoxes = []
/** extra env vars to pass to vagrant for box configuration **/
Map<String, String> vagrantBoxEnvVars = [:]
private static final String GRADLE_JDK_VERSION = "12.0.1+12@69cfe15208a647278a19ef0990eea691"
private Jdk linuxGradleJdk;
private Jdk windowsGradleJdk;
@Override
void apply(Project project) {
project.pluginManager.apply(JdkDownloadPlugin.class)
NamedDomainObjectContainer<Jdk> jdksContainer = (NamedDomainObjectContainer<Jdk>) project.getExtensions().getByName("jdks");
linuxGradleJdk = jdksContainer.create("linux_gradle") {
version = GRADLE_JDK_VERSION
platform = "linux"
}
windowsGradleJdk = jdksContainer.create("windows_gradle") {
version = GRADLE_JDK_VERSION
platform = "windows"
}
collectAvailableBoxes(project)
// Creates the Vagrant extension for the project
project.extensions.create('esvagrant', VagrantPropertiesExtension, listSelectedBoxes(project))
// Add required repositories for packaging tests
configurePackagingArchiveRepositories(project)
// Creates custom configurations for Bats testing files (and associated scripts and archives)
createPackagingConfiguration(project)
project.configurations.create(PACKAGING_TEST_CONFIGURATION)
// Creates all the main Vagrant tasks
createVagrantTasks(project)
if (project.extensions.esvagrant.boxes == null || project.extensions.esvagrant.boxes.size() == 0) {
throw new InvalidUserDataException('Must specify at least one vagrant box')
}
for (String box : project.extensions.esvagrant.boxes) {
if (ALL_BOXES.contains(box) == false) {
throw new InvalidUserDataException("Vagrant box [${box}] is unknown to this plugin. Valid boxes are ${ALL_BOXES}")
}
if (availableBoxes.contains(box) == false) {
throw new InvalidUserDataException("Vagrant box [${box}] is not available because an image is not supplied for it. " +
"Available boxes with supplied images are ${availableBoxes}")
}
}
// Creates all tasks related to the Vagrant boxes
createVagrantBoxesTasks(project)
}
/**
* Enumerate all the boxes that we know about and could possibly choose to test
*/
private void collectAvailableBoxes(Project project) {
// these images are hardcoded in the Vagrantfile and are always available
availableBoxes.addAll(LINUX_BOXES)
// these images need to be provided at runtime
String windows_2012r2_box = project.getProperties().get('vagrant.windows-2012r2.id')
if (windows_2012r2_box != null && windows_2012r2_box.isEmpty() == false) {
availableBoxes.add('windows-2012r2')
vagrantBoxEnvVars['VAGRANT_WINDOWS_2012R2_BOX'] = windows_2012r2_box
}
String windows_2016_box = project.getProperties().get('vagrant.windows-2016.id')
if (windows_2016_box != null && windows_2016_box.isEmpty() == false) {
availableBoxes.add('windows-2016')
vagrantBoxEnvVars['VAGRANT_WINDOWS_2016_BOX'] = windows_2016_box
}
}
/**
* Enumerate all the boxes that we have chosen to test
*/
private static List<String> listSelectedBoxes(Project project) {
String vagrantBoxes = project.getProperties().get('vagrant.boxes', 'sample')
switch (vagrantBoxes) {
case 'sample':
return SAMPLE
case 'linux-all':
return LINUX_BOXES
case 'windows-all':
return WINDOWS_BOXES
case 'all':
return ALL_BOXES
case '':
return []
default:
return vagrantBoxes.split(',')
}
}
private static void configurePackagingArchiveRepositories(Project project) {
RepositoryHandler repos = project.repositories
repos.jcenter() // will have releases before 5.0.0
/* Setup a repository that tries to download from
https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext]
which should work for 5.0.0+. This isn't a real ivy repository but gradle
is fine with that */
repos.ivy {
name "elasticsearch"
artifactPattern "https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext]"
}
}
private static void createPackagingConfiguration(Project project) {
project.configurations.create(PACKAGING_CONFIGURATION)
String upgradeFromVersionRaw = System.getProperty("tests.packaging.upgradeVersion");
Version upgradeFromVersion
if (upgradeFromVersionRaw == null) {
String firstPartOfSeed = project.rootProject.testSeed.tokenize(':').get(0)
final long seed = Long.parseUnsignedLong(firstPartOfSeed, 16)
final def indexCompatVersions = project.bwcVersions.indexCompatible
upgradeFromVersion = indexCompatVersions[new Random(seed).nextInt(indexCompatVersions.size())]
} else {
upgradeFromVersion = Version.fromString(upgradeFromVersionRaw)
}
List<Object> dependencies = new ArrayList<>()
DISTRIBUTIONS.each {
// Adds a dependency for the current version
dependencies.add(project.dependencies.project(path: ":distribution:${it}", configuration: 'default'))
}
if (project.ext.bwc_tests_enabled) {
// The version of elasticsearch that we upgrade *from*
// we only add them as dependencies if the bwc tests are enabled, so we don't trigger builds otherwise
BwcVersions.UnreleasedVersionInfo unreleasedInfo = project.bwcVersions.unreleasedInfo(upgradeFromVersion)
if (unreleasedInfo != null) {
// handle snapshots pointing to bwc build
UPGRADE_FROM_ARCHIVES.each {
dependencies.add(project.dependencies.project(
path: "${unreleasedInfo.gradleProjectPath}", configuration: it))
if (upgradeFromVersion.onOrAfter('6.3.0')) {
dependencies.add(project.dependencies.project(
path: "${unreleasedInfo.gradleProjectPath}", configuration: "oss-${it}"))
}
}
} else {
UPGRADE_FROM_ARCHIVES.each {
// The version of elasticsearch that we upgrade *from*
if (upgradeFromVersion.onOrAfter('7.0.0')) {
String arch = it == "rpm" ? "x86_64" : "amd64"
dependencies.add("downloads.${it}:elasticsearch:${upgradeFromVersion}-${arch}@${it}")
dependencies.add("downloads.${it}:elasticsearch-oss:${upgradeFromVersion}-${arch}@${it}")
} else {
dependencies.add("downloads.${it}:elasticsearch:${upgradeFromVersion}@${it}")
if (upgradeFromVersion.onOrAfter('6.3.0')) {
dependencies.add("downloads.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}")
}
}
}
}
} else {
// Upgrade tests will go from current to current when the BWC tests are disabled to skip real BWC tests.
upgradeFromVersion = Version.fromString(project.version)
}
for (Object dependency : dependencies) {
project.dependencies.add(PACKAGING_CONFIGURATION, dependency)
}
project.extensions.esvagrant.upgradeFromVersion = upgradeFromVersion
}
private static void createCleanTask(Project project) {
if (project.tasks.findByName('clean') == null) {
project.tasks.create('clean', Delete.class) {
description 'Clean the project build directory'
group 'Build'
delete project.buildDir
}
}
}
private static void createStopTask(Project project) {
project.tasks.create('stop') {
description 'Stop any tasks from tests that still may be running'
group 'Verification'
}
}
private static void createSmokeTestTask(Project project) {
project.tasks.create('vagrantSmokeTest') {
description 'Smoke test the specified vagrant boxes'
group 'Verification'
}
}
private void createPrepareVagrantTestEnvTask(Project project) {
File packagingDir = new File(project.buildDir, PACKAGING_CONFIGURATION)
File archivesDir = new File(packagingDir, 'archives')
Copy copyPackagingArchives = project.tasks.create('copyPackagingArchives', Copy) {
into archivesDir
from project.configurations[PACKAGING_CONFIGURATION]
}
File testsDir = new File(packagingDir, 'tests')
Copy copyPackagingTests = project.tasks.create('copyPackagingTests', Copy) {
into testsDir
from project.configurations[PACKAGING_TEST_CONFIGURATION]
}
Task createLinuxRunnerScript = project.tasks.create('createLinuxRunnerScript', FileContentsTask) {
dependsOn copyPackagingTests, linuxGradleJdk
file "${testsDir}/run-tests.sh"
contents """\
if [ "\$#" -eq 0 ]; then
test_args=( "${-> project.extensions.esvagrant.testClass}" )
else
test_args=( "\$@" )
fi
"${-> convertLinuxPath(project, linuxGradleJdk.toString()) }"/bin/java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}"
"""
}
Task createWindowsRunnerScript = project.tasks.create('createWindowsRunnerScript', FileContentsTask) {
dependsOn copyPackagingTests, windowsGradleJdk
file "${testsDir}/run-tests.ps1"
// the use of $args rather than param() here is deliberate because the syntax for array (multivalued) parameters is likely
// a little trappy for those unfamiliar with powershell
contents """\
try {
if (\$args.Count -eq 0) {
\$testArgs = @("${-> project.extensions.esvagrant.testClass}")
} else {
\$testArgs = \$args
}
& "${-> convertWindowsPath(project, windowsGradleJdk.toString()) }/bin/java" -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs
exit \$LASTEXITCODE
} catch {
# catch if we have a failure to even run the script at all above, equivalent to set -e, sort of
echo "\$_.Exception.Message"
exit 1
}
"""
}
Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) {
dependsOn copyPackagingArchives
file "${archivesDir}/version"
contents project.version
}
Task createUpgradeFromFile = project.tasks.create('createUpgradeFromFile', FileContentsTask) {
String version = project.extensions.esvagrant.upgradeFromVersion
if (project.bwcVersions.unreleased.contains(project.extensions.esvagrant.upgradeFromVersion)) {
version += "-SNAPSHOT"
}
dependsOn copyPackagingArchives
file "${archivesDir}/upgrade_from_version"
contents version
}
Task createUpgradeIsOssFile = project.tasks.create('createUpgradeIsOssFile', FileContentsTask) {
dependsOn copyPackagingArchives
doFirst {
project.delete("${archivesDir}/upgrade_is_oss")
if (project.extensions.esvagrant.upgradeFromVersion.before('6.3.0')) {
throw new StopExecutionException("upgrade version is before 6.3.0")
}
}
file "${archivesDir}/upgrade_is_oss"
contents ''
}
File batsDir = new File(packagingDir, BATS)
Copy copyBatsTests = project.tasks.create('copyBatsTests', Copy) {
into "${batsDir}/tests"
from {
"${project.extensions.esvagrant.batsDir}/tests"
}
}
Copy copyBatsUtils = project.tasks.create('copyBatsUtils', Copy) {
into "${batsDir}/utils"
from {
"${project.extensions.esvagrant.batsDir}/utils"
}
}
// Now we iterate over dependencies of the bats configuration. When a project dependency is found,
// we bring back its test files or test utils.
project.afterEvaluate {
project.configurations[PACKAGING_CONFIGURATION].dependencies
.findAll {it.targetConfiguration == PACKAGING_CONFIGURATION }
.each { d ->
if (d instanceof DefaultProjectDependency) {
DefaultProjectDependency externalBatsDependency = (DefaultProjectDependency) d
Project externalBatsProject = externalBatsDependency.dependencyProject
String externalBatsDir = externalBatsProject.extensions.esvagrant.batsDir
if (project.extensions.esvagrant.inheritTests) {
copyBatsTests.from(externalBatsProject.files("${externalBatsDir}/tests"))
}
if (project.extensions.esvagrant.inheritTestUtils) {
copyBatsUtils.from(externalBatsProject.files("${externalBatsDir}/utils"))
}
}
}
}
Task vagrantSetUpTask = project.tasks.create('setupPackagingTest')
vagrantSetUpTask.dependsOn(
'vagrantCheckVersion',
copyPackagingArchives,
copyPackagingTests,
createLinuxRunnerScript,
createWindowsRunnerScript,
createVersionFile,
createUpgradeFromFile,
createUpgradeIsOssFile,
copyBatsTests,
copyBatsUtils
)
}
private static void createPackagingTestTask(Project project) {
project.tasks.create('packagingTest') {
group 'Verification'
description "Tests distribution installation on different platforms using vagrant. See TESTING.asciidoc for details."
dependsOn 'vagrantCheckVersion'
}
}
private void createBoxListTasks(Project project) {
project.tasks.create('listAllBoxes') {
group 'Verification'
description 'List all vagrant boxes which can be tested by this plugin'
doLast {
println("All vagrant boxes supported by ${project.path}")
for (String box : ALL_BOXES) {
println(box)
}
}
dependsOn 'vagrantCheckVersion'
}
project.tasks.create('listAvailableBoxes') {
group 'Verification'
description 'List all vagrant boxes which are available for testing'
doLast {
println("All vagrant boxes available to ${project.path}")
for (String box : availableBoxes) {
println(box)
}
}
dependsOn 'vagrantCheckVersion'
}
}
private void createVagrantTasks(Project project) {
createCleanTask(project)
createStopTask(project)
createSmokeTestTask(project)
createPrepareVagrantTestEnvTask(project)
createPackagingTestTask(project)
createBoxListTasks(project)
}
private void createVagrantBoxesTasks(Project project) {
assert project.extensions.esvagrant.boxes != null
assert project.tasks.stop != null
Task stop = project.tasks.stop
assert project.tasks.vagrantSmokeTest != null
Task vagrantSmokeTest = project.tasks.vagrantSmokeTest
assert project.tasks.vagrantCheckVersion != null
Task vagrantCheckVersion = project.tasks.vagrantCheckVersion
assert project.tasks.virtualboxCheckVersion != null
Task virtualboxCheckVersion = project.tasks.virtualboxCheckVersion
assert project.tasks.setupPackagingTest != null
Task setupPackagingTest = project.tasks.setupPackagingTest
assert project.tasks.packagingTest != null
Task packagingTest = project.tasks.packagingTest
/*
* We always use the main project.rootDir as Vagrant's current working directory (VAGRANT_CWD)
* so that boxes are not duplicated for every Gradle project that use this VagrantTestPlugin.
*/
def vagrantEnvVars = [
'VAGRANT_CWD' : "${project.rootDir.absolutePath}",
'VAGRANT_VAGRANTFILE' : 'Vagrantfile',
'VAGRANT_PROJECT_DIR' : "${project.projectDir.absolutePath}"
]
vagrantEnvVars.putAll(vagrantBoxEnvVars)
// Each box gets it own set of tasks
for (String box : availableBoxes) {
String boxTask = box.capitalize().replace('-', '')
// always add a halt task for all boxes, so clean makes sure they are all shutdown
Task halt = project.tasks.create("vagrant${boxTask}#halt", VagrantCommandTask) {
command 'halt'
boxName box
environmentVars vagrantEnvVars
}
stop.dependsOn(halt)
Task update = project.tasks.create("vagrant${boxTask}#update", VagrantCommandTask) {
command 'box'
subcommand 'update'
boxName box
environmentVars vagrantEnvVars
dependsOn vagrantCheckVersion, virtualboxCheckVersion
}
update.mustRunAfter(setupPackagingTest)
/*
* Destroying before every execution can be annoying while iterating on tests locally. Therefore, we provide a flag
* vagrant.destroy that defaults to true that can be used to control whether or not to destroy any test boxes before test
* execution.
*/
final String vagrantDestroyProperty = project.getProperties().get('vagrant.destroy', 'true')
boolean vagrantDestroy
if ("true".equals(vagrantDestroyProperty)) {
vagrantDestroy = true
} else if ("false".equals(vagrantDestroyProperty)) {
vagrantDestroy = false
} else {
throw new GradleException("[vagrant.destroy] must be [true] or [false] but was [" + vagrantDestroyProperty + "]")
}
/*
* Some versions of Vagrant will fail destroy if the box does not exist. Therefore we check if the box exists before attempting
* to destroy the box.
*/
final Task destroy = project.tasks.create("vagrant${boxTask}#destroy", LoggedExec) {
commandLine "bash", "-c", "vagrant status ${box} | grep -q \"${box}\\s\\+not created\" || vagrant destroy ${box} --force"
workingDir project.rootProject.rootDir
environment vagrantEnvVars
}
destroy.onlyIf { vagrantDestroy }
update.mustRunAfter(destroy)
Task up = project.tasks.create("vagrant${boxTask}#up", VagrantCommandTask) {
command 'up'
boxName box
environmentVars vagrantEnvVars
/* We lock the provider to virtualbox because the Vagrantfile specifies
lots of boxes that only work properly in virtualbox. Virtualbox is
vagrant's default but its possible to change that default and folks do.
But the boxes that we use are unlikely to work properly with other
virtualization providers. Thus the lock. */
args '--provision', '--provider', 'virtualbox'
/* It'd be possible to check if the box is already up here and output
SKIPPED but that would require running vagrant status which is slow! */
dependsOn destroy, update
}
Task smoke = project.tasks.create("vagrant${boxTask}#smoketest", Exec) {
environment vagrantEnvVars
dependsOn up
finalizedBy halt
}
vagrantSmokeTest.dependsOn(smoke)
if (LINUX_BOXES.contains(box)) {
smoke.commandLine = ['vagrant', 'ssh', box, '--command',
"set -o pipefail && echo 'Hello from ${project.path}' | sed -ue 's/^/ ${box}: /'"]
} else {
smoke.commandLine = ['vagrant', 'winrm', box, '--command',
"Write-Host ' ${box}: Hello from ${project.path}'"]
}
if (LINUX_BOXES.contains(box)) {
Task batsPackagingTest = project.tasks.create("vagrant${boxTask}#batsPackagingTest", BatsOverVagrantTask) {
remoteCommand BATS_TEST_COMMAND
boxName box
environmentVars vagrantEnvVars
dependsOn up, setupPackagingTest
finalizedBy halt
}
TaskExecutionAdapter batsPackagingReproListener = createReproListener(project, batsPackagingTest.path)
batsPackagingTest.doFirst {
project.gradle.addListener(batsPackagingReproListener)
}
batsPackagingTest.doLast {
project.gradle.removeListener(batsPackagingReproListener)
}
if (project.extensions.esvagrant.boxes.contains(box)) {
// these tests are temporarily disabled for suse boxes while we debug an issue
// https://github.com/elastic/elasticsearch/issues/30295
if (box.equals("opensuse-42") == false && box.equals("sles-12") == false) {
packagingTest.dependsOn(batsPackagingTest)
}
}
}
Task javaPackagingTest = project.tasks.create("vagrant${boxTask}#javaPackagingTest", VagrantCommandTask) {
boxName box
environmentVars vagrantEnvVars
dependsOn up, setupPackagingTest
finalizedBy halt
}
// todo remove this onlyIf after all packaging tests are consolidated
javaPackagingTest.onlyIf {
project.extensions.esvagrant.testClass != null
}
if (LINUX_BOXES.contains(box)) {
javaPackagingTest.command = 'ssh'
javaPackagingTest.args = ['--command', 'sudo bash "$PACKAGING_TESTS/run-tests.sh"']
} else {
// powershell sessions run over winrm always run as administrator, whether --elevated is passed or not. however
// remote sessions have some restrictions on what they can do, such as impersonating another user (or the same user
// without administrator elevation), which we need to do for these tests. passing --elevated runs the session
// as a scheduled job locally on the vm as a true administrator to get around this limitation
//
// https://github.com/hashicorp/vagrant/blob/9c299a2a357fcf87f356bb9d56e18a037a53d138/plugins/communicators/winrm/communicator.rb#L195-L225
// https://devops-collective-inc.gitbooks.io/secrets-of-powershell-remoting/content/manuscript/accessing-remote-computers.html
javaPackagingTest.command = 'winrm'
javaPackagingTest.args = ['--elevated', '--command', '& "$Env:PACKAGING_TESTS/run-tests.ps1"; exit $LASTEXITCODE']
}
TaskExecutionAdapter javaPackagingReproListener = createReproListener(project, javaPackagingTest.path)
javaPackagingTest.doFirst {
project.gradle.addListener(javaPackagingReproListener)
}
javaPackagingTest.doLast {
project.gradle.removeListener(javaPackagingReproListener)
}
if (project.extensions.esvagrant.boxes.contains(box)) {
// these tests are temporarily disabled for suse boxes while we debug an issue
// https://github.com/elastic/elasticsearch/issues/30295
if (box.equals("opensuse-42") == false && box.equals("sles-12") == false) {
packagingTest.dependsOn(javaPackagingTest)
}
}
}
}
private static TaskExecutionAdapter createReproListener(Project project, String reproTaskPath) {
return new TaskExecutionAdapter() {
@Override
void afterExecute(Task task, TaskState state) {
final String gradlew = Os.isFamily(Os.FAMILY_WINDOWS) ? "gradlew" : "./gradlew"
if (state.failure != null) {
println "REPRODUCE WITH: ${gradlew} \"${reproTaskPath}\" -Dtests.seed=${project.testSeed} "
}
}
}
}
// convert the given path from an elasticsearch repo path to a VM path
private String convertLinuxPath(Project project, String path) {
return "/elasticsearch/" + project.rootDir.toPath().relativize(Paths.get(path));
}
private String convertWindowsPath(Project project, String path) {
return "C:\\elasticsearch\\" + project.rootDir.toPath().relativize(Paths.get(path)).toString().replace('/', '\\');
}
}

View File

@ -203,12 +203,16 @@ public class DistributionDownloadPlugin implements Plugin<Project> {
String extension = distribution.getType().toString();
String classifier = "x86_64";
if (distribution.getType() == Type.ARCHIVE) {
if (distribution.getVersion().before("7.0.0")) {
classifier = null; // no platform specific distros before 7.0
} else if (distribution.getType() == Type.ARCHIVE) {
extension = distribution.getPlatform() == Platform.WINDOWS ? "zip" : "tar.gz";
classifier = distribution.getPlatform() + "-" + classifier;
} else if (distribution.getType() == Type.DEB) {
classifier = "amd64";
}
return FAKE_IVY_GROUP + ":elasticsearch" + (distribution.getFlavor() == Flavor.OSS ? "-oss:" : ":")
+ distribution.getVersion() + ":" + classifier + "@" + extension;
+ distribution.getVersion() + (classifier == null ? "" : ":" + classifier) + "@" + extension;
}
private static Dependency projectDependency(Project project, String projectPath, String projectConfig) {

View File

@ -20,9 +20,7 @@
package org.elasticsearch.gradle;
import org.gradle.api.Buildable;
import org.gradle.api.Project;
import org.gradle.api.artifacts.Configuration;
import org.gradle.api.file.FileTree;
import org.gradle.api.model.ObjectFactory;
import org.gradle.api.provider.Property;
import org.gradle.api.tasks.TaskDependency;
@ -30,9 +28,8 @@ import org.gradle.api.tasks.TaskDependency;
import java.io.File;
import java.util.Iterator;
import java.util.Locale;
import java.util.concurrent.Callable;
public class ElasticsearchDistribution implements Buildable {
public class ElasticsearchDistribution implements Buildable, Iterable<File> {
public enum Platform {
LINUX,
@ -93,10 +90,6 @@ public class ElasticsearchDistribution implements Buildable {
return configuration.getBuildDependencies();
}
public FileTree getFileTree(Project project) {
return project.fileTree((Callable<File>) configuration::getSingleFile);
}
@Override
public String toString() {
return configuration.getSingleFile().toString();
@ -190,6 +183,16 @@ public class ElasticsearchDistribution implements Buildable {
return configuration.getBuildDependencies();
}
@Override
public Iterator<File> iterator() {
return configuration.iterator();
}
// TODO: remove this when distro tests are per distribution
public Configuration getConfiguration() {
return configuration;
}
// internal, make this distribution's configuration unmodifiable
void finalizeValues() {

View File

@ -83,9 +83,13 @@ public class Jdk implements Buildable, Iterable<File> {
return configuration;
}
public String getPath() {
return configuration.getSingleFile().toString();
}
@Override
public String toString() {
return configuration.getSingleFile().toString();
return getPath();
}
@Override

View File

@ -48,13 +48,14 @@ import java.util.regex.Matcher;
public class JdkDownloadPlugin implements Plugin<Project> {
private static final String REPO_NAME_PREFIX = "jdk_repo_";
private static final String CONTAINER_NAME = "jdks";
@Override
public void apply(Project project) {
NamedDomainObjectContainer<Jdk> jdksContainer = project.container(Jdk.class, name ->
new Jdk(name, project)
);
project.getExtensions().add("jdks", jdksContainer);
project.getExtensions().add(CONTAINER_NAME, jdksContainer);
project.afterEvaluate(p -> {
for (Jdk jdk : jdksContainer) {
@ -82,6 +83,11 @@ public class JdkDownloadPlugin implements Plugin<Project> {
});
}
@SuppressWarnings("unchecked")
public static NamedDomainObjectContainer<Jdk> getContainer(Project project) {
return (NamedDomainObjectContainer<Jdk>) project.getExtensions().getByName(CONTAINER_NAME);
}
private static void setupRootJdkDownload(Project rootProject, String platform, String version) {
String extractTaskName = "extract" + capitalize(platform) + "Jdk" + version;
// NOTE: this is *horrendous*, but seems to be the only way to check for the existence of a registered task

View File

@ -19,6 +19,7 @@
package org.elasticsearch.gradle;
import org.elasticsearch.gradle.tool.ClasspathUtils;
import org.gradle.api.GradleException;
import org.gradle.api.logging.Logger;
import org.gradle.internal.jvm.Jvm;
@ -27,11 +28,16 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.UncheckedIOException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class ReaperService {
private static final String REAPER_CLASS = "org/elasticsearch/gradle/reaper/Reaper.class";
private static final Pattern REAPER_JAR_PATH_PATTERN = Pattern.compile("file:(.*)!/" + REAPER_CLASS);
private Logger logger;
private Path buildDir;
private Path inputDir;
@ -103,13 +109,7 @@ public class ReaperService {
private synchronized void ensureReaperStarted() {
if (reaperProcess == null) {
try {
// copy the reaper jar
Path jarPath = buildDir.resolve("reaper").resolve("reaper.jar");
Files.createDirectories(jarPath.getParent());
InputStream jarInput = ReaperPlugin.class.getResourceAsStream("/META-INF/reaper.jar");
try (OutputStream out = Files.newOutputStream(jarPath)) {
jarInput.transferTo(out);
}
Path jarPath = locateReaperJar();
// ensure the input directory exists
Files.createDirectories(inputDir);
@ -134,6 +134,47 @@ public class ReaperService {
}
}
private Path locateReaperJar() {
if (ClasspathUtils.isElasticsearchProject()) {
// when running inside the Elasticsearch build just pull find the jar in the runtime classpath
URL main = this.getClass().getClassLoader().getResource(REAPER_CLASS);
String mainPath = main.getFile();
Matcher matcher = REAPER_JAR_PATH_PATTERN.matcher(mainPath);
if (matcher.matches()) {
String path = matcher.group(1);
return Path.of(
OS.<String>conditional()
.onWindows(() -> path.substring(1))
.onUnix(() -> path)
.supply()
);
} else {
throw new RuntimeException("Unable to locate " + REAPER_CLASS + " on build classpath.");
}
} else {
// copy the reaper jar
Path jarPath = buildDir.resolve("reaper").resolve("reaper.jar");
try {
Files.createDirectories(jarPath.getParent());
} catch (IOException e) {
throw new UncheckedIOException("Unable to create reaper JAR output directory " + jarPath.getParent(), e);
}
try (
OutputStream out = Files.newOutputStream(jarPath);
InputStream jarInput = this.getClass().getResourceAsStream("/META-INF/reaper.jar");
) {
logger.info("Copying reaper.jar...");
jarInput.transferTo(out);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return jarPath;
}
}
private void ensureReaperAlive() {
if (reaperProcess.isAlive() == false) {
throw new IllegalStateException("Reaper process died unexpectedly! Check the log at " + logFile.toString());

View File

@ -16,30 +16,24 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.vagrant
import org.gradle.api.tasks.Input
package org.elasticsearch.gradle;
/**
* Runs bats over vagrant. Pretty much like running it using Exec but with a
* nicer output formatter.
*/
public class BatsOverVagrantTask extends VagrantCommandTask {
import org.gradle.api.GradleException;
@Input
Object remoteCommand
public class Util {
BatsOverVagrantTask() {
command = 'ssh'
}
void setRemoteCommand(Object remoteCommand) {
this.remoteCommand = Objects.requireNonNull(remoteCommand)
setArgs((Iterable<?>) ['--command', remoteCommand])
}
@Override
protected OutputStream createLoggerOutputStream() {
return new TapLoggerOutputStream(logger, getProgressLoggerFactory().newOperation(boxName).setDescription(boxName));
public static boolean getBooleanProperty(String property, boolean defaultValue) {
String propertyValue = System.getProperty(property);
if (propertyValue == null) {
return defaultValue;
}
if ("true".equals(propertyValue)) {
return true;
} else if ("false".equals(propertyValue)) {
return false;
} else {
throw new GradleException("Sysprop [" + property + "] must be [true] or [false] but was [" + propertyValue + "]");
}
}
}

View File

@ -23,6 +23,7 @@ import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import static java.nio.charset.StandardCharsets.UTF_8;
@ -45,6 +46,8 @@ public class GlobalBuildInfoPlugin implements Plugin<Project> {
File compilerJavaHome = findCompilerJavaHome();
File runtimeJavaHome = findRuntimeJavaHome(compilerJavaHome);
Object gitRevisionResolver = createGitRevisionResolver(project);
final List<JavaHome> javaVersions = new ArrayList<>();
for (int version = 8; version <= Integer.parseInt(minimumCompilerVersion.getMajorVersion()); version++) {
if (System.getenv(getJavaHomeEnvVarName(Integer.toString(version))) != null) {
@ -77,7 +80,7 @@ public class GlobalBuildInfoPlugin implements Plugin<Project> {
project.allprojects(p -> {
// Make sure than any task execution generates and prints build info
p.getTasks().all(task -> {
p.getTasks().configureEach(task -> {
if (task != generateTask && task != printTask) {
task.dependsOn(printTask);
}
@ -92,7 +95,7 @@ public class GlobalBuildInfoPlugin implements Plugin<Project> {
ext.set("minimumCompilerVersion", minimumCompilerVersion);
ext.set("minimumRuntimeVersion", minimumRuntimeVersion);
ext.set("gradleJavaVersion", Jvm.current().getJavaVersion());
ext.set("gitRevision", gitRevision(project));
ext.set("gitRevision", gitRevisionResolver);
ext.set("buildDate", ZonedDateTime.now(ZoneOffset.UTC));
});
}
@ -203,21 +206,35 @@ public class GlobalBuildInfoPlugin implements Plugin<Project> {
return _defaultParallel;
}
private String gitRevision(final Project project) {
final ByteArrayOutputStream stdout = new ByteArrayOutputStream();
final ByteArrayOutputStream stderr = new ByteArrayOutputStream();
final ExecResult result = project.exec(spec -> {
spec.setExecutable("git");
spec.setArgs(Arrays.asList("rev-parse", "HEAD"));
spec.setStandardOutput(stdout);
spec.setErrorOutput(stderr);
spec.setIgnoreExitValue(true);
});
private Object createGitRevisionResolver(final Project project) {
return new Object() {
private final AtomicReference<String> gitRevision = new AtomicReference<>();
@Override
public String toString() {
if (gitRevision.get() == null) {
final ByteArrayOutputStream stdout = new ByteArrayOutputStream();
final ByteArrayOutputStream stderr = new ByteArrayOutputStream();
final ExecResult result = project.exec(spec -> {
spec.setExecutable("git");
spec.setArgs(Arrays.asList("rev-parse", "HEAD"));
spec.setStandardOutput(stdout);
spec.setErrorOutput(stderr);
spec.setIgnoreExitValue(true);
});
final String revision;
if (result.getExitValue() != 0) {
revision = "unknown";
} else {
revision = stdout.toString(UTF_8).trim();
}
this.gitRevision.compareAndSet(null, revision);
}
return gitRevision.get();
}
};
if (result.getExitValue() != 0) {
return "unknown";
}
return stdout.toString(UTF_8).trim();
}
}

View File

@ -23,6 +23,7 @@ import org.gradle.api.DefaultTask;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import org.gradle.api.tasks.TaskAction;
import org.gradle.api.tasks.TaskProvider;
import java.io.File;
import java.io.IOException;
@ -40,20 +41,20 @@ public class UpdateShasTask extends DefaultTask {
private final Logger logger = Logging.getLogger(getClass());
/** The parent dependency licenses task to use configuration from */
private DependencyLicensesTask parentTask;
private TaskProvider<DependencyLicensesTask> parentTask;
public UpdateShasTask() {
setDescription("Updates the sha files for the dependencyLicenses check");
setOnlyIf(element -> parentTask.getLicensesDir() != null);
setOnlyIf(element -> parentTask.get().getLicensesDir() != null);
}
@TaskAction
public void updateShas() throws NoSuchAlgorithmException, IOException {
Set<File> shaFiles = parentTask.getShaFiles();
Set<File> shaFiles = parentTask.get().getShaFiles();
for (File dependency : parentTask.getDependencies()) {
for (File dependency : parentTask.get().getDependencies()) {
String jarName = dependency.getName();
File shaFile = parentTask.getShaFile(jarName);
File shaFile = parentTask.get().getShaFile(jarName);
if (shaFile.exists() == false) {
createSha(dependency, jarName, shaFile);
@ -71,16 +72,16 @@ public class UpdateShasTask extends DefaultTask {
private void createSha(File dependency, String jarName, File shaFile) throws IOException, NoSuchAlgorithmException {
logger.lifecycle("Adding sha for " + jarName);
String sha = parentTask.getSha1(dependency);
String sha = parentTask.get().getSha1(dependency);
Files.write(shaFile.toPath(), sha.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE);
}
public DependencyLicensesTask getParentTask() {
return parentTask;
return parentTask.get();
}
public void setParentTask(DependencyLicensesTask parentTask) {
public void setParentTask(TaskProvider<DependencyLicensesTask> parentTask) {
this.parentTask = parentTask;
}
}

View File

@ -0,0 +1,92 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.test;
import org.gradle.api.DefaultTask;
import org.gradle.api.file.Directory;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.InputDirectory;
import org.gradle.api.tasks.TaskAction;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class BatsTestTask extends DefaultTask {
private Directory testsDir;
private Directory utilsDir;
private Directory archivesDir;
private String packageName;
@InputDirectory
public Directory getTestsDir() {
return testsDir;
}
public void setTestsDir(Directory testsDir) {
this.testsDir = testsDir;
}
@InputDirectory
public Directory getUtilsDir() {
return utilsDir;
}
public void setUtilsDir(Directory utilsDir) {
this.utilsDir = utilsDir;
}
@InputDirectory
public Directory getArchivesDir() {
return archivesDir;
}
public void setArchivesDir(Directory archivesDir) {
this.archivesDir = archivesDir;
}
@Input
public String getPackageName() {
return packageName;
}
public void setPackageName(String packageName) {
this.packageName = packageName;
}
@TaskAction
public void runBats() {
List<Object> command = new ArrayList<>();
command.add("bats");
command.add("--tap");
command.addAll(testsDir.getAsFileTree().getFiles().stream()
.filter(f -> f.getName().endsWith(".bats"))
.sorted().collect(Collectors.toList()));
getProject().exec(spec -> {
spec.setWorkingDir(archivesDir.getAsFile());
spec.environment(System.getenv());
spec.environment("BATS_TESTS", testsDir.getAsFile().toString());
spec.environment("BATS_UTILS", utilsDir.getAsFile().toString());
spec.environment("PACKAGE_NAME", packageName);
spec.setCommandLine(command);
});
}
}

View File

@ -0,0 +1,91 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.test;
import org.elasticsearch.gradle.vagrant.VagrantShellTask;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.options.Option;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static org.elasticsearch.gradle.vagrant.VagrantMachine.convertLinuxPath;
import static org.elasticsearch.gradle.vagrant.VagrantMachine.convertWindowsPath;
/**
* Run a gradle task of the current build, within the configured vagrant VM.
*/
public class GradleDistroTestTask extends VagrantShellTask {
private String taskName;
private String testClass;
private List<String> extraArgs = new ArrayList<>();
public void setTaskName(String taskName) {
this.taskName = taskName;
}
@Input
public String getTaskName() {
return taskName;
}
@Option(option = "tests", description = "Sets test class or method name to be included, '*' is supported.")
public void setTestClass(String testClass) {
this.testClass = testClass;
}
@Input
public List<String> getExtraArgs() {
return extraArgs;
}
public void extraArg(String arg) {
this.extraArgs.add(arg);
}
@Override
protected List<String> getWindowsScript() {
return getScript(true);
}
@Override
protected List<String> getLinuxScript() {
return getScript(false);
}
private List<String> getScript(boolean isWindows) {
String cacheDir = getProject().getBuildDir() + "/gradle-cache";
StringBuilder line = new StringBuilder();
line.append(isWindows ? "& .\\gradlew " : "./gradlew ");
line.append(taskName);
line.append(" --project-cache-dir ");
line.append(isWindows ? convertWindowsPath(getProject(), cacheDir) : convertLinuxPath(getProject(), cacheDir));
line.append(" -S");
line.append(" -D'org.gradle.logging.level'=" + getProject().getGradle().getStartParameter().getLogLevel());
if (testClass != null) {
line.append(" --tests=");
line.append(testClass);
}
extraArgs.stream().map(s -> " " + s).forEach(line::append);
return Collections.singletonList(line.toString());
}
}

View File

@ -0,0 +1,17 @@
package org.elasticsearch.gradle.testclusters;
import org.gradle.api.DefaultTask;
import java.util.Collection;
import java.util.HashSet;
public class DefaultTestClustersTask extends DefaultTask implements TestClustersAware {
private Collection<ElasticsearchCluster> clusters = new HashSet<>();
@Override
public Collection<ElasticsearchCluster> getClusters() {
return clusters;
}
}

View File

@ -117,6 +117,10 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named {
return clusterName;
}
public String getPath() {
return path;
}
@Override
public void setVersion(String version) {
nodes.all(each -> each.setVersion(version));

View File

@ -216,6 +216,9 @@ public class ElasticsearchNode implements TestClusterConfiguration {
public void plugin(URI plugin) {
requireNonNull(plugin, "Plugin name can't be null");
checkFrozen();
if (plugins.contains(plugin)) {
throw new TestClustersException("Plugin already configured for installation " + plugin);
}
this.plugins.add(plugin);
}

View File

@ -1,12 +1,11 @@
package org.elasticsearch.gradle.test;
package org.elasticsearch.gradle.testclusters;
import org.elasticsearch.gradle.testclusters.ElasticsearchCluster;
import org.gradle.api.tasks.CacheableTask;
import org.gradle.api.tasks.Nested;
import org.gradle.api.tasks.testing.Test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import static org.elasticsearch.gradle.testclusters.TestDistribution.INTEG_TEST;
@ -16,9 +15,9 @@ import static org.elasticsearch.gradle.testclusters.TestDistribution.INTEG_TEST;
* {@link Nested} inputs.
*/
@CacheableTask
public class RestTestRunnerTask extends Test {
public class RestTestRunnerTask extends Test implements TestClustersAware {
private Collection<ElasticsearchCluster> clusters = new ArrayList<>();
private Collection<ElasticsearchCluster> clusters = new HashSet<>();
public RestTestRunnerTask() {
super();
@ -26,12 +25,15 @@ public class RestTestRunnerTask extends Test {
task -> clusters.stream().flatMap(c -> c.getNodes().stream()).anyMatch(n -> n.getTestDistribution() != INTEG_TEST));
}
@Override
public int getMaxParallelForks() {
return 1;
}
@Nested
@Override
public Collection<ElasticsearchCluster> getClusters() {
return clusters;
}
public void testCluster(ElasticsearchCluster cluster) {
this.clusters.add(cluster);
}
}

View File

@ -0,0 +1,26 @@
package org.elasticsearch.gradle.testclusters;
import org.gradle.api.Task;
import org.gradle.api.tasks.Nested;
import java.util.Collection;
interface TestClustersAware extends Task {
@Nested
Collection<ElasticsearchCluster> getClusters();
default void useCluster(ElasticsearchCluster cluster) {
if (cluster.getPath().equals(getProject().getPath()) == false) {
throw new TestClustersException(
"Task " + getPath() + " can't use test cluster from" +
" another project " + cluster
);
}
for (ElasticsearchNode node : cluster.getNodes()) {
this.dependsOn(node.getDistribution().getExtracted());
}
getClusters().add(cluster);
}
}

View File

@ -18,32 +18,22 @@
*/
package org.elasticsearch.gradle.testclusters;
import groovy.lang.Closure;
import org.elasticsearch.gradle.DistributionDownloadPlugin;
import org.elasticsearch.gradle.ElasticsearchDistribution;
import org.elasticsearch.gradle.ReaperPlugin;
import org.elasticsearch.gradle.ReaperService;
import org.elasticsearch.gradle.test.RestTestRunnerTask;
import org.gradle.api.NamedDomainObjectContainer;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.Task;
import org.gradle.api.execution.TaskActionListener;
import org.gradle.api.execution.TaskExecutionListener;
import org.gradle.api.invocation.Gradle;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import org.gradle.api.plugins.ExtraPropertiesExtension;
import org.gradle.api.tasks.TaskState;
import java.io.File;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
public class TestClustersPlugin implements Plugin<Project> {
@ -51,12 +41,6 @@ public class TestClustersPlugin implements Plugin<Project> {
public static final String EXTENSION_NAME = "testClusters";
private static final Logger logger = Logging.getLogger(TestClustersPlugin.class);
private static final String TESTCLUSTERS_INSPECT_FAILURE = "testclusters.inspect.failure";
private final Map<Task, List<ElasticsearchCluster>> usedClusters = new HashMap<>();
private final Map<ElasticsearchCluster, Integer> claimsInventory = new HashMap<>();
private final Set<ElasticsearchCluster> runningClusters = new HashSet<>();
private final Boolean allowClusterToSurvive = Boolean.valueOf(System.getProperty(TESTCLUSTERS_INSPECT_FAILURE, "false"));
private ReaperService reaper;
@ -73,20 +57,22 @@ public class TestClustersPlugin implements Plugin<Project> {
// provide a task to be able to list defined clusters.
createListClustersTask(project, container);
// create DSL for tasks to mark clusters these use
createUseClusterTaskExtension(project, container);
if (project.getRootProject().getExtensions().findByType(TestClustersRegistry.class) == null) {
TestClustersRegistry registry = project.getRootProject().getExtensions()
.create("testClusters", TestClustersRegistry.class);
// When we know what tasks will run, we claim the clusters of those task to differentiate between clusters
// that are defined in the build script and the ones that will actually be used in this invocation of gradle
// we use this information to determine when the last task that required the cluster executed so that we can
// terminate the cluster right away and free up resources.
configureClaimClustersHook(project);
// When we know what tasks will run, we claim the clusters of those task to differentiate between clusters
// that are defined in the build script and the ones that will actually be used in this invocation of gradle
// we use this information to determine when the last task that required the cluster executed so that we can
// terminate the cluster right away and free up resources.
configureClaimClustersHook(project.getGradle(), registry);
// Before each task, we determine if a cluster needs to be started for that task.
configureStartClustersHook(project);
// Before each task, we determine if a cluster needs to be started for that task.
configureStartClustersHook(project.getGradle(), registry);
// After each task we determine if there are clusters that are no longer needed.
configureStopClustersHook(project);
// After each task we determine if there are clusters that are no longer needed.
configureStopClustersHook(project.getGradle(), registry);
}
}
private NamedDomainObjectContainer<ElasticsearchCluster> createTestClustersContainerExtension(Project project) {
@ -120,78 +106,28 @@ public class TestClustersPlugin implements Plugin<Project> {
);
}
private void createUseClusterTaskExtension(Project project, NamedDomainObjectContainer<ElasticsearchCluster> container) {
// register an extension for all current and future tasks, so that any task can declare that it wants to use a
// specific cluster.
project.getTasks().all((Task task) ->
task.getExtensions().findByType(ExtraPropertiesExtension.class)
.set(
"useCluster",
new Closure<Void>(project, task) {
public void doCall(ElasticsearchCluster cluster) {
if (container.contains(cluster) == false) {
throw new TestClustersException(
"Task " + task.getPath() + " can't use test cluster from" +
" another project " + cluster
);
}
Object thisObject = this.getThisObject();
if (thisObject instanceof Task == false) {
throw new AssertionError("Expected " + thisObject + " to be an instance of " +
"Task, but got: " + thisObject.getClass());
}
usedClusters.computeIfAbsent(task, k -> new ArrayList<>()).add(cluster);
for (ElasticsearchNode node : cluster.getNodes()) {
((Task) thisObject).dependsOn(node.getDistribution().getExtracted());
}
if (thisObject instanceof RestTestRunnerTask) {
((RestTestRunnerTask) thisObject).testCluster(cluster);
}
}
})
);
}
private void configureClaimClustersHook(Project project) {
private static void configureClaimClustersHook(Gradle gradle, TestClustersRegistry registry) {
// Once we know all the tasks that need to execute, we claim all the clusters that belong to those and count the
// claims so we'll know when it's safe to stop them.
project.getGradle().getTaskGraph().whenReady(taskExecutionGraph -> {
Set<String> forExecution = taskExecutionGraph.getAllTasks().stream()
.map(Task::getPath)
.collect(Collectors.toSet());
usedClusters.forEach((task, listOfClusters) ->
listOfClusters.forEach(elasticsearchCluster -> {
if (forExecution.contains(task.getPath())) {
elasticsearchCluster.freeze();
claimsInventory.put(elasticsearchCluster, claimsInventory.getOrDefault(elasticsearchCluster, 0) + 1);
}
}));
if (claimsInventory.isEmpty() == false) {
logger.info("Claims inventory: {}", claimsInventory);
}
gradle.getTaskGraph().whenReady(taskExecutionGraph -> {
taskExecutionGraph.getAllTasks().stream()
.filter(task -> task instanceof TestClustersAware)
.map(task -> (TestClustersAware) task)
.flatMap(task -> task.getClusters().stream())
.forEach(registry::claimCluster);
});
}
private void configureStartClustersHook(Project project) {
project.getGradle().addListener(
private static void configureStartClustersHook(Gradle gradle, TestClustersRegistry registry) {
gradle.addListener(
new TaskActionListener() {
@Override
public void beforeActions(Task task) {
if (task instanceof TestClustersAware == false) {
return;
}
// we only start the cluster before the actions, so we'll not start it if the task is up-to-date
List<ElasticsearchCluster> neededButNotRunning = usedClusters.getOrDefault(
task,
Collections.emptyList()
)
.stream()
.filter(cluster -> runningClusters.contains(cluster) == false)
.collect(Collectors.toList());
neededButNotRunning
.forEach(elasticsearchCluster -> {
elasticsearchCluster.start();
runningClusters.add(elasticsearchCluster);
});
((TestClustersAware) task).getClusters().forEach(registry::maybeStartCluster);
}
@Override
public void afterActions(Task task) {}
@ -199,43 +135,18 @@ public class TestClustersPlugin implements Plugin<Project> {
);
}
private void configureStopClustersHook(Project project) {
project.getGradle().addListener(
private static void configureStopClustersHook(Gradle gradle, TestClustersRegistry registry) {
gradle.addListener(
new TaskExecutionListener() {
@Override
public void afterExecute(Task task, TaskState state) {
// always unclaim the cluster, even if _this_ task is up-to-date, as others might not have been
// and caused the cluster to start.
List<ElasticsearchCluster> clustersUsedByTask = usedClusters.getOrDefault(
task,
Collections.emptyList()
);
if (clustersUsedByTask.isEmpty()) {
if (task instanceof TestClustersAware == false) {
return;
}
logger.info("Clusters were used, stopping and releasing permits");
final int permitsToRelease;
if (state.getFailure() != null) {
// If the task fails, and other tasks use this cluster, the other task will likely never be
// executed at all, so we will never be called again to un-claim and terminate it.
clustersUsedByTask.forEach(cluster -> stopCluster(cluster, true));
permitsToRelease = clustersUsedByTask.stream()
.map(cluster -> cluster.getNumberOfNodes())
.reduce(Integer::sum).get();
} else {
clustersUsedByTask.forEach(
cluster -> claimsInventory.put(cluster, claimsInventory.getOrDefault(cluster, 0) - 1)
);
List<ElasticsearchCluster> stoppingClusers = claimsInventory.entrySet().stream()
.filter(entry -> entry.getValue() == 0)
.filter(entry -> runningClusters.contains(entry.getKey()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
stoppingClusers.forEach(cluster -> {
stopCluster(cluster, false);
runningClusters.remove(cluster);
});
}
// always unclaim the cluster, even if _this_ task is up-to-date, as others might not have been
// and caused the cluster to start.
((TestClustersAware) task).getClusters()
.forEach(cluster -> registry.stopCluster(cluster, state.getFailure() != null));
}
@Override
public void beforeExecute(Task task) {}
@ -243,25 +154,5 @@ public class TestClustersPlugin implements Plugin<Project> {
);
}
private void stopCluster(ElasticsearchCluster cluster, boolean taskFailed) {
if (allowClusterToSurvive) {
logger.info("Not stopping clusters, disabled by property");
if (taskFailed) {
// task failed or this is the last one to stop
for (int i=1 ; ; i += i) {
logger.lifecycle(
"No more test clusters left to run, going to sleep because {} was set," +
" interrupt (^C) to stop clusters.", TESTCLUSTERS_INSPECT_FAILURE
);
try {
Thread.sleep(1000 * i);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
}
}
}
cluster.stop(taskFailed);
}
}

View File

@ -0,0 +1,66 @@
package org.elasticsearch.gradle.testclusters;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
public class TestClustersRegistry {
private static final Logger logger = Logging.getLogger(TestClustersRegistry.class);
private static final String TESTCLUSTERS_INSPECT_FAILURE = "testclusters.inspect.failure";
private final Boolean allowClusterToSurvive = Boolean.valueOf(System.getProperty(TESTCLUSTERS_INSPECT_FAILURE, "false"));
private final Map<ElasticsearchCluster, Integer> claimsInventory = new HashMap<>();
private final Set<ElasticsearchCluster> runningClusters = new HashSet<>();
public void claimCluster(ElasticsearchCluster cluster) {
cluster.freeze();
claimsInventory.put(cluster, claimsInventory.getOrDefault(cluster, 0) + 1);
}
public void maybeStartCluster(ElasticsearchCluster cluster) {
if (runningClusters.contains(cluster)) {
return;
}
runningClusters.add(cluster);
cluster.start();
}
public void stopCluster(ElasticsearchCluster cluster, boolean taskFailed) {
if (taskFailed) {
// If the task fails, and other tasks use this cluster, the other task will likely never be
// executed at all, so we will never be called again to un-claim and terminate it.
if (allowClusterToSurvive) {
logger.info("Not stopping clusters, disabled by property");
// task failed or this is the last one to stop
for (int i = 1; ; i += i) {
logger.lifecycle(
"No more test clusters left to run, going to sleep because {} was set," +
" interrupt (^C) to stop clusters.", TESTCLUSTERS_INSPECT_FAILURE
);
try {
Thread.sleep(1000 * i);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
}
} else {
cluster.stop(false);
runningClusters.remove(cluster);
}
} else {
int currentClaims = claimsInventory.getOrDefault(cluster, 0) - 1;
claimsInventory.put(cluster, currentClaims);
if (currentClaims <= 0 && runningClusters.contains(cluster)) {
cluster.stop(false);
runningClusters.remove(cluster);
}
}
}
}

View File

@ -21,8 +21,12 @@ package org.elasticsearch.gradle.tool;
import org.gradle.api.Action;
import org.gradle.api.NamedDomainObjectContainer;
import org.gradle.api.Project;
import org.gradle.api.Task;
import org.gradle.api.UnknownTaskException;
import org.gradle.api.plugins.JavaPluginConvention;
import org.gradle.api.tasks.SourceSetContainer;
import org.gradle.api.tasks.TaskContainer;
import org.gradle.api.tasks.TaskProvider;
import java.util.Optional;
@ -37,6 +41,7 @@ public abstract class Boilerplate {
.orElse(collection.create(name));
}
public static <T> T maybeCreate(NamedDomainObjectContainer<T> collection, String name, Action<T> action) {
return Optional.ofNullable(collection.findByName(name))
.orElseGet(() -> {
@ -47,4 +52,45 @@ public abstract class Boilerplate {
}
public static <T extends Task> TaskProvider<T> maybeRegister(TaskContainer tasks, String name, Class<T> clazz, Action<T> action) {
try {
return tasks.named(name, clazz);
} catch (UnknownTaskException e) {
return tasks.register(name, clazz, action);
}
}
public static void maybeConfigure(TaskContainer tasks, String name, Action<? super Task> config) {
TaskProvider<?> task;
try {
task = tasks.named(name);
} catch (UnknownTaskException e) {
return;
}
task.configure(config);
}
public static <T extends Task> void maybeConfigure(
TaskContainer tasks, String name,
Class<? extends T> type,
Action<? super T> config
) {
tasks.withType(type).configureEach(task -> {
if (task.getName().equals(name)) {
config.execute(task);
}
});
}
public static TaskProvider<?> findByName(TaskContainer tasks, String name) {
TaskProvider<?> task;
try {
task = tasks.named(name);
} catch (UnknownTaskException e) {
return null;
}
return task;
}
}

View File

@ -19,12 +19,10 @@
package org.elasticsearch.gradle.vagrant;
import org.elasticsearch.gradle.LoggingOutputStream;
import org.gradle.api.GradleScriptException;
import org.gradle.api.logging.Logger;
import org.gradle.internal.logging.progress.ProgressLogger;
import java.util.Formatter;
import java.util.function.UnaryOperator;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@ -41,49 +39,43 @@ import java.util.regex.Pattern;
* There is a Tap4j project but we can't use it because it wants to parse the
* entire TAP stream at once and won't parse it stream-wise.
*/
public class TapLoggerOutputStream extends LoggingOutputStream {
public class BatsProgressLogger implements UnaryOperator<String> {
private static final Pattern lineRegex =
Pattern.compile("(?<status>ok|not ok) \\d+(?<skip> # skip (?<skipReason>\\(.+\\))?)? \\[(?<suite>.+)\\] (?<test>.+)");
private static final Pattern startRegex = Pattern.compile("1..(\\d+)");
private final Logger logger;
private final ProgressLogger progressLogger;
private boolean isStarted = false;
private int testsCompleted = 0;
private int testsFailed = 0;
private int testsSkipped = 0;
private Integer testCount;
private String countsFormat;
TapLoggerOutputStream(Logger logger, ProgressLogger progressLogger) {
public BatsProgressLogger(Logger logger) {
this.logger = logger;
this.progressLogger = progressLogger;
}
@Override
public void logLine(String line) {
if (isStarted == false) {
progressLogger.started("started");
isStarted = true;
}
public String apply(String line) {
if (testCount == null) {
try {
int lastDot = line.lastIndexOf('.');
testCount = Integer.parseInt(line.substring(lastDot + 1));
int length = String.valueOf(testCount).length();
String count = "%0" + length + "d";
countsFormat = "[" + count +"|" + count + "|" + count + "/" + count + "]";
return;
} catch (Exception e) {
throw new GradleScriptException("Error parsing first line of TAP stream!!", e);
Matcher m = startRegex.matcher(line);
if (m.matches() == false) {
// haven't reached start of bats test yet, pass through whatever we see
return line;
}
testCount = Integer.parseInt(m.group(1));
int length = String.valueOf(testCount).length();
String count = "%0" + length + "d";
countsFormat = "[" + count +"|" + count + "|" + count + "/" + count + "]";
return null;
}
Matcher m = lineRegex.matcher(line);
if (m.matches() == false) {
/* These might be failure report lines or comments or whatever. Its hard
to tell and it doesn't matter. */
logger.warn(line);
return;
return null;
}
boolean skipped = m.group("skip") != null;
boolean success = skipped == false && m.group("status").equals("ok");
@ -104,15 +96,9 @@ public class TapLoggerOutputStream extends LoggingOutputStream {
}
String counts = new Formatter().format(countsFormat, testsCompleted, testsFailed, testsSkipped, testCount).out().toString();
progressLogger.progress("BATS " + counts + ", " + status + " [" + suiteName + "] " + testName);
if (success == false) {
logger.warn(line);
}
}
@Override
public void close() {
flush();
progressLogger.completed();
return "BATS " + counts + ", " + status + " [" + suiteName + "] " + testName;
}
}

View File

@ -0,0 +1,147 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.vagrant;
import org.elasticsearch.gradle.ReaperPlugin;
import org.elasticsearch.gradle.ReaperService;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.Task;
import org.gradle.api.execution.TaskActionListener;
import org.gradle.api.execution.TaskExecutionListener;
import org.gradle.api.tasks.TaskState;
import java.io.ByteArrayOutputStream;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.function.Consumer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class VagrantBasePlugin implements Plugin<Project> {
@Override
public void apply(Project project) {
project.getRootProject().getPluginManager().apply(VagrantSetupCheckerPlugin.class);
project.getRootProject().getPluginManager().apply(VagrantManagerPlugin.class);
project.getRootProject().getPluginManager().apply(ReaperPlugin.class);
ReaperService reaper = project.getRootProject().getExtensions().getByType(ReaperService.class);
VagrantExtension extension = project.getExtensions().create("vagrant", VagrantExtension.class, project);
VagrantMachine service = project.getExtensions().create("vagrantService", VagrantMachine.class, project, extension, reaper);
project.getGradle().getTaskGraph().whenReady(graph ->
service.refs = graph.getAllTasks().stream()
.filter(t -> t instanceof VagrantShellTask)
.filter(t -> t.getProject() == project)
.count());
}
/**
* Check vagrant and virtualbox versions, if any vagrant test tasks will be run.
*/
static class VagrantSetupCheckerPlugin implements Plugin<Project> {
private static final Pattern VAGRANT_VERSION = Pattern.compile("Vagrant (\\d+\\.\\d+\\.\\d+)");
private static final Pattern VIRTUAL_BOX_VERSION = Pattern.compile("(\\d+\\.\\d+)");
@Override
public void apply(Project project) {
if (project != project.getRootProject()) {
throw new IllegalArgumentException("VagrantSetupCheckerPlugin can only be applied to the root project of a build");
}
project.getGradle().getTaskGraph().whenReady(graph -> {
boolean needsVagrant = graph.getAllTasks().stream().anyMatch(t -> t instanceof VagrantShellTask);
if (needsVagrant) {
checkVersion(project, "vagrant", VAGRANT_VERSION, 1, 8, 6);
checkVersion(project, "vboxmanage", VIRTUAL_BOX_VERSION, 5, 1);
}
});
}
void checkVersion(Project project, String tool, Pattern versionRegex, int... minVersion) {
ByteArrayOutputStream pipe = new ByteArrayOutputStream();
project.exec(spec -> {
spec.setCommandLine(tool, "--version");
spec.setStandardOutput(pipe);
});
String output = pipe.toString(StandardCharsets.UTF_8).trim();
Matcher matcher = versionRegex.matcher(output);
if (matcher.find() == false) {
throw new IllegalStateException(tool +
" version output [" + output + "] did not match regex [" + versionRegex.pattern() + "]");
}
String version = matcher.group(1);
List<Integer> versionParts = Stream.of(version.split("\\.")).map(Integer::parseInt).collect(Collectors.toList());
for (int i = 0; i < minVersion.length; ++i) {
int found = versionParts.get(i);
if (found > minVersion[i]) {
break; // most significant version is good
} else if (found < minVersion[i]) {
throw new IllegalStateException("Unsupported version of " + tool + ". Found [" + version + "], expected [" +
Stream.of(minVersion).map(String::valueOf).collect(Collectors.joining(".")) + "+");
} // else equal, so check next element
}
}
}
/**
* Adds global hooks to manage destroying, starting and updating VMs.
*/
static class VagrantManagerPlugin implements Plugin<Project>, TaskActionListener, TaskExecutionListener {
@Override
public void apply(Project project) {
if (project != project.getRootProject()) {
throw new IllegalArgumentException("VagrantManagerPlugin can only be applied to the root project of a build");
}
project.getGradle().addListener(this);
}
private void callIfVagrantTask(Task task, Consumer<VagrantMachine> method) {
if (task instanceof VagrantShellTask) {
VagrantMachine service = task.getProject().getExtensions().getByType(VagrantMachine.class);
method.accept(service);
}
}
@Override
public void beforeExecute(Task task) { /* nothing to do */}
@Override
public void afterActions(Task task) { /* nothing to do */ }
@Override
public void beforeActions(Task task) {
callIfVagrantTask(task, VagrantMachine::maybeStartVM);
}
@Override
public void afterExecute(Task task, TaskState state) {
callIfVagrantTask(task, service -> service.maybeStopVM(state.getFailure() != null));
}
}
}

View File

@ -0,0 +1,93 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.vagrant;
import org.gradle.api.Project;
import org.gradle.api.file.RegularFileProperty;
import org.gradle.api.provider.MapProperty;
import org.gradle.api.provider.Property;
import org.gradle.api.tasks.Input;
import java.io.File;
import java.util.Map;
public class VagrantExtension {
private final Property<String> box;
private final MapProperty<String, Object> hostEnv;
private final MapProperty<String, Object> vmEnv;
private final RegularFileProperty vagrantfile;
private boolean isWindowsVM;
public VagrantExtension(Project project) {
this.box = project.getObjects().property(String.class);
this.hostEnv = project.getObjects().mapProperty(String.class, Object.class);
this.vmEnv = project.getObjects().mapProperty(String.class, Object.class);
this.vagrantfile = project.getObjects().fileProperty();
this.vagrantfile.convention(project.getRootProject().getLayout().getProjectDirectory().file("Vagrantfile"));
this.isWindowsVM = false;
}
@Input
public String getBox() {
return box.get();
}
public void setBox(String box) {
// TODO: should verify this against the Vagrantfile, but would need to do so in afterEvaluate once vagrantfile is unmodifiable
this.box.set(box);
}
@Input
public Map<String, Object> getHostEnv() {
return hostEnv.get();
}
public void hostEnv(String name, Object value) {
hostEnv.put(name, value);
}
@Input
public Map<String, Object> getVmEnv() {
return vmEnv.get();
}
public void vmEnv(String name, Object value) {
vmEnv.put(name, value);
}
@Input
public boolean isWindowsVM() {
return isWindowsVM;
}
public void setIsWindowsVM(boolean isWindowsVM) {
this.isWindowsVM = isWindowsVM;
}
@Input
public File getVagrantfile() {
return this.vagrantfile.get().getAsFile();
}
public void setVagrantfile(File file) {
vagrantfile.set(file);
}
}

View File

@ -0,0 +1,210 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.vagrant;
import org.apache.commons.io.output.TeeOutputStream;
import org.elasticsearch.gradle.LoggedExec;
import org.elasticsearch.gradle.LoggingOutputStream;
import org.elasticsearch.gradle.ReaperService;
import org.elasticsearch.gradle.Util;
import org.gradle.api.Action;
import org.gradle.api.Project;
import org.gradle.internal.logging.progress.ProgressLogger;
import org.gradle.internal.logging.progress.ProgressLoggerFactory;
import javax.inject.Inject;
import java.io.File;
import java.io.OutputStream;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Objects;
import java.util.function.UnaryOperator;
/**
* An helper to manage a vagrant box.
*
* This is created alongside a {@link VagrantExtension} for a project to manage starting and
* stopping a single vagrant box.
*/
public class VagrantMachine {
private final Project project;
private final VagrantExtension extension;
private final ReaperService reaper;
// pkg private so plugin can set this after construction
long refs;
private boolean isVMStarted = false;
public VagrantMachine(Project project, VagrantExtension extension, ReaperService reaper) {
this.project = project;
this.extension = extension;
this.reaper = reaper;
}
@Inject
protected ProgressLoggerFactory getProgressLoggerFactory() {
throw new UnsupportedOperationException();
}
public void execute(Action<VagrantExecSpec> action) {
VagrantExecSpec vagrantSpec = new VagrantExecSpec();
action.execute(vagrantSpec);
Objects.requireNonNull(vagrantSpec.command);
LoggedExec.exec(project, execSpec -> {
execSpec.setExecutable("vagrant");
File vagrantfile = extension.getVagrantfile();
execSpec.setEnvironment(System.getenv()); // pass through env
execSpec.environment("VAGRANT_CWD", vagrantfile.getParentFile().toString());
execSpec.environment("VAGRANT_VAGRANTFILE", vagrantfile.getName());
execSpec.environment("VAGRANT_LOG", "debug");
extension.getHostEnv().forEach(execSpec::environment);
execSpec.args(vagrantSpec.command);
if (vagrantSpec.subcommand != null) {
execSpec.args(vagrantSpec.subcommand);
}
execSpec.args(extension.getBox());
if (vagrantSpec.args != null) {
execSpec.args(Arrays.asList(vagrantSpec.args));
}
UnaryOperator<String> progressHandler = vagrantSpec.progressHandler;
if (progressHandler == null) {
progressHandler = new VagrantProgressLogger("==> " + extension.getBox() + ": ");
}
OutputStream output = execSpec.getStandardOutput();
// output from vagrant needs to be manually curated because --machine-readable isn't actually "readable"
OutputStream progressStream = new ProgressOutputStream(vagrantSpec.command, progressHandler);
execSpec.setStandardOutput(new TeeOutputStream(output, progressStream));
});
}
// start the configuration VM if it hasn't been started yet
void maybeStartVM() {
if (isVMStarted) {
return;
}
execute(spec -> {
spec.setCommand("box");
spec.setSubcommand("update");
});
// Destroying before every execution can be annoying while iterating on tests locally. Therefore, we provide a flag that defaults
// to true that can be used to control whether or not to destroy any test boxes before test execution.
boolean destroyVM = Util.getBooleanProperty("vagrant.destroy", true);
if (destroyVM) {
execute(spec -> {
spec.setCommand("destroy");
spec.setArgs("--force");
});
}
// register box to be shutdown if gradle dies
reaper.registerCommand(extension.getBox(), "vagrant", "halt", "-f", extension.getBox());
// We lock the provider to virtualbox because the Vagrantfile specifies lots of boxes that only work
// properly in virtualbox. Virtualbox is vagrant's default but its possible to change that default and folks do.
execute(spec -> {
spec.setCommand("up");
spec.setArgs("--provision", "--provider", "virtualbox");
});
isVMStarted = true;
}
// stops the VM if refs are down to 0, or force was called
void maybeStopVM(boolean force) {
assert refs >= 1;
this.refs--;
if ((refs == 0 || force) && isVMStarted) {
execute(spec -> spec.setCommand("halt"));
reaper.unregister(extension.getBox());
}
}
// convert the given path from an elasticsearch repo path to a VM path
public static String convertLinuxPath(Project project, String path) {
return "/elasticsearch/" + project.getRootDir().toPath().relativize(Paths.get(path));
}
public static String convertWindowsPath(Project project, String path) {
return "C:\\elasticsearch\\" + project.getRootDir().toPath().relativize(Paths.get(path)).toString().replace('/', '\\');
}
public static class VagrantExecSpec {
private String command;
private String subcommand;
private String[] args;
private UnaryOperator<String> progressHandler;
private VagrantExecSpec() {}
public void setCommand(String command) {
this.command = command;
}
public void setSubcommand(String subcommand) {
this.subcommand = subcommand;
}
public void setArgs(String... args) {
this.args = args;
}
/**
* A function to translate output from the vagrant command execution to the progress line.
*
* The function takes the current line of output from vagrant, and returns a new
* progress line, or {@code null} if there is no update.
*/
public void setProgressHandler(UnaryOperator<String> progressHandler) {
this.progressHandler = progressHandler;
}
}
private class ProgressOutputStream extends LoggingOutputStream {
private ProgressLogger progressLogger;
private UnaryOperator<String> progressHandler;
ProgressOutputStream(String command, UnaryOperator<String> progressHandler) {
this.progressHandler = progressHandler;
this.progressLogger = getProgressLoggerFactory().newOperation("vagrant");
progressLogger.start(extension.getBox() + "> " + command, "hello");
}
@Override
protected void logLine(String line) {
String progress = progressHandler.apply(line);
if (progress != null) {
progressLogger.progress(progress);
}
System.out.println(line);
}
@Override
public void close() {
progressLogger.completed();
}
}
}

View File

@ -19,30 +19,23 @@
package org.elasticsearch.gradle.vagrant;
import org.elasticsearch.gradle.LoggingOutputStream;
import org.gradle.internal.logging.progress.ProgressLogger;
import java.util.function.UnaryOperator;
public class VagrantProgressLogger implements UnaryOperator<String> {
public class VagrantLoggerOutputStream extends LoggingOutputStream {
private static final String HEADING_PREFIX = "==> ";
private final ProgressLogger progressLogger;
private final String squashedPrefix;
private boolean isStarted = false;
private String lastLine = "";
private boolean inProgressReport = false;
private String heading = "";
private boolean inProgressReport = false;
VagrantLoggerOutputStream(ProgressLogger progressLogger, String squashedPrefix) {
this.progressLogger = progressLogger;
public VagrantProgressLogger(String squashedPrefix) {
this.squashedPrefix = squashedPrefix;
}
@Override
protected void logLine(String line) {
if (isStarted == false) {
progressLogger.started("started");
isStarted = true;
}
public String apply(String line) {
if (line.startsWith("\r\u001b")) {
/* We don't want to try to be a full terminal emulator but we want to
keep the escape sequences from leaking and catch _some_ of the
@ -51,7 +44,7 @@ public class VagrantLoggerOutputStream extends LoggingOutputStream {
if ("[K".equals(line)) {
inProgressReport = true;
}
return;
return null;
}
if (line.startsWith(squashedPrefix)) {
line = line.substring(squashedPrefix.length());
@ -67,14 +60,8 @@ public class VagrantLoggerOutputStream extends LoggingOutputStream {
inProgressReport = false;
line = lastLine + line;
} else {
return;
return null;
}
progressLogger.progress(line);
}
@Override
public void close() {
flush();
progressLogger.completed();
return line;
}
}

View File

@ -0,0 +1,109 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.vagrant;
import org.gradle.api.DefaultTask;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.TaskAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import static org.elasticsearch.gradle.vagrant.VagrantMachine.convertLinuxPath;
import static org.elasticsearch.gradle.vagrant.VagrantMachine.convertWindowsPath;
/**
* A shell script to run within a vagrant VM.
*
* The script is run as root within the VM.
*/
public abstract class VagrantShellTask extends DefaultTask {
private final VagrantExtension extension;
private final VagrantMachine service;
private UnaryOperator<String> progressHandler = UnaryOperator.identity();
public VagrantShellTask() {
extension = getProject().getExtensions().findByType(VagrantExtension.class);
if (extension == null) {
throw new IllegalStateException("elasticsearch.vagrant-base must be applied to create " + getClass().getName());
}
service = getProject().getExtensions().getByType(VagrantMachine.class);
}
@Input
protected abstract List<String> getWindowsScript();
@Input
protected abstract List<String> getLinuxScript();
@Input
public UnaryOperator<String> getProgressHandler() {
return progressHandler;
}
public void setProgressHandler(UnaryOperator<String> progressHandler) {
this.progressHandler = progressHandler;
}
@TaskAction
public void runScript() {
String rootDir = getProject().getRootDir().toString();
if (extension.isWindowsVM()) {
service.execute(spec -> {
spec.setCommand("winrm");
List<String> script = new ArrayList<>();
script.add("try {");
script.add("cd " + convertWindowsPath(getProject(), rootDir));
extension.getVmEnv().forEach((k, v) -> script.add("$Env:" + k + " = \"" + v + "\""));
script.addAll(getWindowsScript().stream().map(s -> " " + s).collect(Collectors.toList()));
script.addAll(Arrays.asList(
" exit $LASTEXITCODE",
"} catch {",
// catch if we have a failure to even run the script at all above, equivalent to set -e, sort of
" echo $_.Exception.Message",
" exit 1",
"}"));
spec.setArgs("--elevated", "--command", String.join("\n", script));
spec.setProgressHandler(progressHandler);
});
} else {
service.execute(spec -> {
spec.setCommand("ssh");
List<String> script = new ArrayList<>();
script.add("sudo bash -c '"); // start inline bash script
script.add("pwd");
script.add("cd " + convertLinuxPath(getProject(), rootDir));
extension.getVmEnv().forEach((k, v) -> script.add("export " + k + "=" + v));
script.addAll(getLinuxScript());
script.add("'"); // end inline bash script
spec.setArgs("--command", String.join("\n", script));
spec.setProgressHandler(progressHandler);
});
}
}
}

View File

@ -0,0 +1,20 @@
#
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
implementation-class=org.elasticsearch.gradle.test.DistroTestPlugin

View File

@ -0,0 +1 @@
implementation-class=org.elasticsearch.gradle.ReaperPlugin

View File

@ -1 +0,0 @@
implementation-class=org.elasticsearch.gradle.vagrant.VagrantTestPlugin

View File

@ -1 +0,0 @@
implementation-class=org.elasticsearch.gradle.vagrant.VagrantSupportPlugin

View File

@ -16,10 +16,13 @@ import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.io.UncheckedIOException;
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.regex.Pattern;
/**
* A wrapper around gradle's Exec task to capture output and log on error.
@ -28,7 +31,7 @@ import java.util.function.Function;
public class LoggedExec extends Exec {
private Consumer<Logger> outputLogger;
public LoggedExec() {
if (getLogger().isInfoEnabled() == false) {
@ -98,6 +101,8 @@ public class LoggedExec extends Exec {
return genericExec(project, project::javaexec, action);
}
private static final Pattern NEWLINE = Pattern.compile(System.lineSeparator());
private static <T extends BaseExecSpec> ExecResult genericExec(
Project project,
Function<Action<T>,ExecResult> function,
@ -107,19 +112,20 @@ public class LoggedExec extends Exec {
return function.apply(action);
}
ByteArrayOutputStream output = new ByteArrayOutputStream();
ByteArrayOutputStream error = new ByteArrayOutputStream();
try {
return function.apply(spec -> {
spec.setStandardOutput(output);
spec.setErrorOutput(error);
spec.setErrorOutput(output);
action.execute(spec);
try {
output.write(("Output for " + spec.getExecutable() + ":").getBytes(StandardCharsets.UTF_8));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
} catch (Exception e) {
try {
project.getLogger().error("Standard output:");
project.getLogger().error(output.toString("UTF-8"));
project.getLogger().error("Standard error:");
project.getLogger().error(error.toString("UTF-8"));
NEWLINE.splitAsStream(output.toString("UTF-8")).forEach(s -> project.getLogger().error("| " + s));
} catch (UnsupportedEncodingException ue) {
throw new GradleException("Failed to read exec output", ue);
}

View File

@ -63,12 +63,12 @@ public class DistributionDownloadPluginIT extends GradleIntegrationTestCase {
Files.newInputStream(Paths.get("src/testKit/distribution-download/distribution/files/fake_elasticsearch.zip"))) {
filebytes = stream.readAllBytes();
}
String urlPath = "/downloads/elasticsearch/elasticsearch-1.0.0-windows-x86_64.zip";
String urlPath = "/downloads/elasticsearch/elasticsearch-7.0.0-windows-x86_64.zip";
wireMock.stubFor(head(urlEqualTo(urlPath)).willReturn(aResponse().withStatus(200)));
wireMock.stubFor(get(urlEqualTo(urlPath)).willReturn(aResponse().withStatus(200).withBody(filebytes)));
wireMock.start();
assertExtractedDistro("1.0.0", "archive", "windows", null, null,
assertExtractedDistro("7.0.0", "archive", "windows", null, null,
"tests.download_service", wireMock.baseUrl());
} catch (Exception e) {
// for debugging

View File

@ -0,0 +1,21 @@
package org.elasticsearch.gradle;
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
import org.gradle.testkit.runner.BuildResult;
import org.gradle.testkit.runner.GradleRunner;
import org.junit.Before;
public class ReaperPluginIT extends GradleIntegrationTestCase {
private GradleRunner runner;
@Before
public void setup() {
runner = getGradleRunner("reaper");
}
public void testCanLaunchReaper() {
BuildResult result = runner.withArguments(":launchReaper", "-S", "--info").build();
assertTaskSuccessful(result, ":launchReaper");
assertOutputContains(result.getOutput(), "Copying reaper.jar...");
}
}

View File

@ -1,11 +1,13 @@
package org.elasticsearch.gradle.precommit;
import org.elasticsearch.gradle.test.GradleUnitTestCase;
import org.gradle.api.Action;
import org.gradle.api.GradleException;
import org.gradle.api.Project;
import org.gradle.api.artifacts.Dependency;
import org.gradle.api.file.FileCollection;
import org.gradle.api.plugins.JavaPlugin;
import org.gradle.api.tasks.TaskProvider;
import org.gradle.testfixtures.ProjectBuilder;
import org.junit.Before;
import org.junit.Rule;
@ -31,7 +33,7 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
private UpdateShasTask updateShas;
private DependencyLicensesTask task;
private TaskProvider<DependencyLicensesTask> task;
private Project project;
@ -51,7 +53,7 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
expectedException.expectMessage(containsString("exists, but there are no dependencies"));
getLicensesDir(project).mkdir();
task.checkDependencies();
task.get().checkDependencies();
}
@Test
@ -60,12 +62,12 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
expectedException.expectMessage(containsString("does not exist, but there are dependencies"));
project.getDependencies().add("compile", dependency);
task.checkDependencies();
task.get().checkDependencies();
}
@Test
public void givenProjectWithoutLicensesDirNorDependenciesThenShouldReturnSilently() throws Exception {
task.checkDependencies();
task.get().checkDependencies();
}
@Test
@ -78,7 +80,7 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
createFileIn(licensesDir, "groovy-all-NOTICE.txt", "");
project.getDependencies().add("compile", project.getDependencies().localGroovy());
task.checkDependencies();
task.get().checkDependencies();
}
@Test
@ -90,7 +92,7 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
getLicensesDir(project).mkdir();
updateShas.updateShas();
task.checkDependencies();
task.get().checkDependencies();
}
@Test
@ -103,7 +105,7 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
createFileIn(getLicensesDir(project), "groovy-all-LICENSE.txt", "");
updateShas.updateShas();
task.checkDependencies();
task.get().checkDependencies();
}
@Test
@ -113,7 +115,7 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
File licensesDir = getLicensesDir(project);
createAllDefaultDependencyFiles(licensesDir, "groovy-all");
task.checkDependencies();
task.get().checkDependencies();
}
@Test
@ -127,7 +129,7 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
createAllDefaultDependencyFiles(licensesDir, "groovy-all");
createFileIn(licensesDir, "non-declared-LICENSE.txt", "");
task.checkDependencies();
task.get().checkDependencies();
}
@Test
@ -141,7 +143,7 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
createAllDefaultDependencyFiles(licensesDir, "groovy-all");
createFileIn(licensesDir, "non-declared-NOTICE.txt", "");
task.checkDependencies();
task.get().checkDependencies();
}
@Test
@ -155,7 +157,7 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
createAllDefaultDependencyFiles(licensesDir, "groovy-all");
createFileIn(licensesDir, "non-declared.sha1", "");
task.checkDependencies();
task.get().checkDependencies();
}
@Test
@ -175,7 +177,7 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
Files.write(groovySha, new byte[] { 1 }, StandardOpenOption.CREATE);
task.checkDependencies();
task.get().checkDependencies();
}
@Test
@ -189,8 +191,8 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
mappings.put("from", "groovy-all");
mappings.put("to", "groovy");
task.mapping(mappings);
task.checkDependencies();
task.get().mapping(mappings);
task.get().checkDependencies();
}
@Test
@ -201,8 +203,8 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
createFileIn(licensesDir, "groovy-all-LICENSE.txt", "");
createFileIn(licensesDir, "groovy-all-NOTICE.txt", "");
task.ignoreSha("groovy-all");
task.checkDependencies();
task.get().ignoreSha("groovy-all");
task.get().checkDependencies();
}
@Test
@ -210,7 +212,7 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
expectedException.expect(GradleException.class);
expectedException.expectMessage(containsString("isn't a valid directory"));
task.getShaFiles();
task.get().getShaFiles();
}
private Project createProject() {
@ -244,7 +246,7 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
Files.write(file, content.getBytes(StandardCharsets.UTF_8));
}
private UpdateShasTask createUpdateShasTask(Project project, DependencyLicensesTask dependencyLicensesTask) {
private UpdateShasTask createUpdateShasTask(Project project, TaskProvider<DependencyLicensesTask> dependencyLicensesTask) {
UpdateShasTask task = project.getTasks()
.register("updateShas", UpdateShasTask.class)
.get();
@ -253,12 +255,15 @@ public class DependencyLicensesTaskTests extends GradleUnitTestCase {
return task;
}
private DependencyLicensesTask createDependencyLicensesTask(Project project) {
DependencyLicensesTask task = project.getTasks()
.register("dependencyLicenses", DependencyLicensesTask.class)
.get();
private TaskProvider<DependencyLicensesTask> createDependencyLicensesTask(Project project) {
TaskProvider<DependencyLicensesTask> task = project.getTasks()
.register("dependencyLicenses", DependencyLicensesTask.class, new Action<DependencyLicensesTask>() {
@Override
public void execute(DependencyLicensesTask dependencyLicensesTask) {
dependencyLicensesTask.setDependencies(getDependencies(project));
}
});
task.setDependencies(getDependencies(project));
return task;
}

View File

@ -2,11 +2,13 @@ package org.elasticsearch.gradle.precommit;
import org.apache.commons.io.FileUtils;
import org.elasticsearch.gradle.test.GradleUnitTestCase;
import org.gradle.api.Action;
import org.gradle.api.GradleException;
import org.gradle.api.Project;
import org.gradle.api.artifacts.Dependency;
import org.gradle.api.file.FileCollection;
import org.gradle.api.plugins.JavaPlugin;
import org.gradle.api.tasks.TaskProvider;
import org.gradle.testfixtures.ProjectBuilder;
import org.junit.Before;
import org.junit.Rule;
@ -125,12 +127,15 @@ public class UpdateShasTaskTests extends GradleUnitTestCase {
return task;
}
private DependencyLicensesTask createDependencyLicensesTask(Project project) {
DependencyLicensesTask task = project.getTasks()
.register("dependencyLicenses", DependencyLicensesTask.class)
.get();
private TaskProvider<DependencyLicensesTask> createDependencyLicensesTask(Project project) {
TaskProvider<DependencyLicensesTask> task = project.getTasks()
.register("dependencyLicenses", DependencyLicensesTask.class, new Action<DependencyLicensesTask>() {
@Override
public void execute(DependencyLicensesTask dependencyLicensesTask) {
dependencyLicensesTask.setDependencies(getDependencies(project));
}
});
task.setDependencies(getDependencies(project));
return task;
}

View File

@ -0,0 +1,11 @@
plugins {
id 'elasticsearch.reaper'
}
task launchReaper {
doLast {
def reaper = project.extensions.getByName('reaper')
reaper.registerCommand('test', 'true')
reaper.unregister('test')
}
}

View File

@ -20,7 +20,7 @@ slf4j = 1.6.2
# when updating the JNA version, also update the version in buildSrc/build.gradle
jna = 4.5.1
netty = 4.1.36.Final
netty = 4.1.38.Final
joda = 2.10.2
# when updating this version, you need to ensure compatibility with:

View File

@ -55,7 +55,7 @@ public class NoopPlugin extends Plugin implements ActionPlugin {
IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<DiscoveryNodes> nodesInCluster) {
return Arrays.asList(
new RestNoopBulkAction(settings, restController),
new RestNoopSearchAction(settings, restController));
new RestNoopBulkAction(restController),
new RestNoopSearchAction(restController));
}
}

View File

@ -27,7 +27,6 @@ import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.BaseRestHandler;
@ -45,9 +44,8 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT;
import static org.elasticsearch.rest.RestStatus.OK;
public class RestNoopBulkAction extends BaseRestHandler {
public RestNoopBulkAction(Settings settings, RestController controller) {
super(settings);
public RestNoopBulkAction(RestController controller) {
controller.registerHandler(POST, "/_noop_bulk", this);
controller.registerHandler(PUT, "/_noop_bulk", this);
controller.registerHandler(POST, "/{index}/_noop_bulk", this);

View File

@ -20,20 +20,17 @@ package org.elasticsearch.plugin.noop.action.search;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.RestStatusToXContentListener;
import java.io.IOException;
import static org.elasticsearch.rest.RestRequest.Method.GET;
import static org.elasticsearch.rest.RestRequest.Method.POST;
public class RestNoopSearchAction extends BaseRestHandler {
public RestNoopSearchAction(Settings settings, RestController controller) {
super(settings);
public RestNoopSearchAction(RestController controller) {
controller.registerHandler(GET, "/_noop_search", this);
controller.registerHandler(POST, "/_noop_search", this);
controller.registerHandler(GET, "/{index}/_noop_search", this);
@ -48,7 +45,7 @@ public class RestNoopSearchAction extends BaseRestHandler {
}
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) {
SearchRequest searchRequest = new SearchRequest();
return channel -> client.execute(NoopSearchAction.INSTANCE, searchRequest, new RestStatusToXContentListener<>(channel));
}

View File

@ -19,10 +19,8 @@
package org.elasticsearch.client.dataframe.transforms;
import org.elasticsearch.client.core.IndexerState;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
@ -33,16 +31,14 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona
public class DataFrameTransformCheckpointStats {
public static final ParseField CHECKPOINT = new ParseField("checkpoint");
public static final ParseField INDEXER_STATE = new ParseField("indexer_state");
public static final ParseField POSITION = new ParseField("position");
public static final ParseField CHECKPOINT_PROGRESS = new ParseField("checkpoint_progress");
public static final ParseField TIMESTAMP_MILLIS = new ParseField("timestamp_millis");
public static final ParseField TIME_UPPER_BOUND_MILLIS = new ParseField("time_upper_bound_millis");
public static final DataFrameTransformCheckpointStats EMPTY = new DataFrameTransformCheckpointStats(0L, null, null, null, 0L, 0L);
public static final DataFrameTransformCheckpointStats EMPTY = new DataFrameTransformCheckpointStats(0L, null, null, 0L, 0L);
private final long checkpoint;
private final IndexerState indexerState;
private final DataFrameIndexerPosition position;
private final DataFrameTransformProgress checkpointProgress;
private final long timestampMillis;
@ -51,19 +47,16 @@ public class DataFrameTransformCheckpointStats {
public static final ConstructingObjectParser<DataFrameTransformCheckpointStats, Void> LENIENT_PARSER = new ConstructingObjectParser<>(
"data_frame_transform_checkpoint_stats", true, args -> {
long checkpoint = args[0] == null ? 0L : (Long) args[0];
IndexerState indexerState = (IndexerState) args[1];
DataFrameIndexerPosition position = (DataFrameIndexerPosition) args[2];
DataFrameTransformProgress checkpointProgress = (DataFrameTransformProgress) args[3];
long timestamp = args[4] == null ? 0L : (Long) args[4];
long timeUpperBound = args[5] == null ? 0L : (Long) args[5];
DataFrameIndexerPosition position = (DataFrameIndexerPosition) args[1];
DataFrameTransformProgress checkpointProgress = (DataFrameTransformProgress) args[2];
long timestamp = args[3] == null ? 0L : (Long) args[3];
long timeUpperBound = args[4] == null ? 0L : (Long) args[4];
return new DataFrameTransformCheckpointStats(checkpoint, indexerState, position, checkpointProgress, timestamp, timeUpperBound);
return new DataFrameTransformCheckpointStats(checkpoint, position, checkpointProgress, timestamp, timeUpperBound);
});
static {
LENIENT_PARSER.declareLong(optionalConstructorArg(), CHECKPOINT);
LENIENT_PARSER.declareField(optionalConstructorArg(), p -> IndexerState.fromString(p.text()), INDEXER_STATE,
ObjectParser.ValueType.STRING);
LENIENT_PARSER.declareObject(optionalConstructorArg(), DataFrameIndexerPosition.PARSER, POSITION);
LENIENT_PARSER.declareObject(optionalConstructorArg(), DataFrameTransformProgress.PARSER, CHECKPOINT_PROGRESS);
LENIENT_PARSER.declareLong(optionalConstructorArg(), TIMESTAMP_MILLIS);
@ -74,11 +67,10 @@ public class DataFrameTransformCheckpointStats {
return LENIENT_PARSER.parse(parser, null);
}
public DataFrameTransformCheckpointStats(final long checkpoint, final IndexerState indexerState,
final DataFrameIndexerPosition position, final DataFrameTransformProgress checkpointProgress,
final long timestampMillis, final long timeUpperBoundMillis) {
public DataFrameTransformCheckpointStats(final long checkpoint, final DataFrameIndexerPosition position,
final DataFrameTransformProgress checkpointProgress, final long timestampMillis,
final long timeUpperBoundMillis) {
this.checkpoint = checkpoint;
this.indexerState = indexerState;
this.position = position;
this.checkpointProgress = checkpointProgress;
this.timestampMillis = timestampMillis;
@ -89,10 +81,6 @@ public class DataFrameTransformCheckpointStats {
return checkpoint;
}
public IndexerState getIndexerState() {
return indexerState;
}
public DataFrameIndexerPosition getPosition() {
return position;
}
@ -111,7 +99,7 @@ public class DataFrameTransformCheckpointStats {
@Override
public int hashCode() {
return Objects.hash(checkpoint, indexerState, position, checkpointProgress, timestampMillis, timeUpperBoundMillis);
return Objects.hash(checkpoint, position, checkpointProgress, timestampMillis, timeUpperBoundMillis);
}
@Override
@ -127,7 +115,6 @@ public class DataFrameTransformCheckpointStats {
DataFrameTransformCheckpointStats that = (DataFrameTransformCheckpointStats) other;
return this.checkpoint == that.checkpoint
&& Objects.equals(this.indexerState, that.indexerState)
&& Objects.equals(this.position, that.position)
&& Objects.equals(this.checkpointProgress, that.checkpointProgress)
&& this.timestampMillis == that.timestampMillis

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Locale;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
@ -33,20 +34,20 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona
public class DataFrameTransformStats {
public static final ParseField ID = new ParseField("id");
public static final ParseField TASK_STATE_FIELD = new ParseField("task_state");
public static final ParseField STATE_FIELD = new ParseField("state");
public static final ParseField REASON_FIELD = new ParseField("reason");
public static final ParseField NODE_FIELD = new ParseField("node");
public static final ParseField STATS_FIELD = new ParseField("stats");
public static final ParseField CHECKPOINTING_INFO_FIELD = new ParseField("checkpointing");
public static final ConstructingObjectParser<DataFrameTransformStats, Void> PARSER = new ConstructingObjectParser<>(
"data_frame_transform_state_and_stats_info", true,
a -> new DataFrameTransformStats((String) a[0], (DataFrameTransformTaskState) a[1], (String) a[2],
(NodeAttributes) a[3], (DataFrameIndexerTransformStats) a[4], (DataFrameTransformCheckpointingInfo) a[5]));
"data_frame_transform_state_and_stats_info", true,
a -> new DataFrameTransformStats((String) a[0], (State) a[1], (String) a[2],
(NodeAttributes) a[3], (DataFrameIndexerTransformStats) a[4], (DataFrameTransformCheckpointingInfo) a[5]));
static {
PARSER.declareString(constructorArg(), ID);
PARSER.declareField(optionalConstructorArg(), p -> DataFrameTransformTaskState.fromString(p.text()), TASK_STATE_FIELD,
PARSER.declareField(optionalConstructorArg(), p -> State.fromString(p.text()), STATE_FIELD,
ObjectParser.ValueType.STRING);
PARSER.declareString(optionalConstructorArg(), REASON_FIELD);
PARSER.declareField(optionalConstructorArg(), NodeAttributes.PARSER::apply, NODE_FIELD, ObjectParser.ValueType.OBJECT);
@ -61,16 +62,15 @@ public class DataFrameTransformStats {
private final String id;
private final String reason;
private final DataFrameTransformTaskState taskState;
private final State state;
private final NodeAttributes node;
private final DataFrameIndexerTransformStats indexerStats;
private final DataFrameTransformCheckpointingInfo checkpointingInfo;
public DataFrameTransformStats(String id, DataFrameTransformTaskState taskState, String reason, NodeAttributes node,
DataFrameIndexerTransformStats stats,
public DataFrameTransformStats(String id, State state, String reason, NodeAttributes node, DataFrameIndexerTransformStats stats,
DataFrameTransformCheckpointingInfo checkpointingInfo) {
this.id = id;
this.taskState = taskState;
this.state = state;
this.reason = reason;
this.node = node;
this.indexerStats = stats;
@ -81,8 +81,8 @@ public class DataFrameTransformStats {
return id;
}
public DataFrameTransformTaskState getTaskState() {
return taskState;
public State getState() {
return state;
}
public String getReason() {
@ -103,7 +103,7 @@ public class DataFrameTransformStats {
@Override
public int hashCode() {
return Objects.hash(id, taskState, reason, node, indexerStats, checkpointingInfo);
return Objects.hash(id, state, reason, node, indexerStats, checkpointingInfo);
}
@Override
@ -119,10 +119,23 @@ public class DataFrameTransformStats {
DataFrameTransformStats that = (DataFrameTransformStats) other;
return Objects.equals(this.id, that.id)
&& Objects.equals(this.taskState, that.taskState)
&& Objects.equals(this.state, that.state)
&& Objects.equals(this.reason, that.reason)
&& Objects.equals(this.node, that.node)
&& Objects.equals(this.indexerStats, that.indexerStats)
&& Objects.equals(this.checkpointingInfo, that.checkpointingInfo);
}
public enum State {
STARTED, INDEXING, ABORTING, STOPPING, STOPPED, FAILED;
public static State fromString(String name) {
return valueOf(name.trim().toUpperCase(Locale.ROOT));
}
public String value() {
return name().toLowerCase(Locale.ROOT);
}
}
}

View File

@ -44,7 +44,6 @@ import org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformSt
import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig;
import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate;
import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStats;
import org.elasticsearch.client.dataframe.transforms.DataFrameTransformTaskState;
import org.elasticsearch.client.dataframe.transforms.DestConfig;
import org.elasticsearch.client.dataframe.transforms.SourceConfig;
import org.elasticsearch.client.dataframe.transforms.TimeSyncConfig;
@ -306,10 +305,11 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase {
GetDataFrameTransformStatsResponse statsResponse = execute(new GetDataFrameTransformStatsRequest(id),
client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync);
assertThat(statsResponse.getTransformsStats(), hasSize(1));
DataFrameTransformTaskState taskState = statsResponse.getTransformsStats().get(0).getTaskState();
DataFrameTransformStats.State taskState = statsResponse.getTransformsStats().get(0).getState();
// Since we are non-continuous, the transform could auto-stop between being started earlier and us gathering the statistics
assertThat(taskState, is(oneOf(DataFrameTransformTaskState.STARTED, DataFrameTransformTaskState.STOPPED)));
assertThat(taskState, oneOf(DataFrameTransformStats.State.STARTED, DataFrameTransformStats.State.INDEXING,
DataFrameTransformStats.State.STOPPING, DataFrameTransformStats.State.STOPPED));
StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id, Boolean.TRUE, null);
StopDataFrameTransformResponse stopResponse =
@ -321,8 +321,8 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase {
// Calling stop with wait_for_completion assures that we will be in the `STOPPED` state for the transform task
statsResponse = execute(new GetDataFrameTransformStatsRequest(id),
client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync);
taskState = statsResponse.getTransformsStats().get(0).getTaskState();
assertThat(taskState, is(DataFrameTransformTaskState.STOPPED));
taskState = statsResponse.getTransformsStats().get(0).getState();
assertThat(taskState, is(DataFrameTransformStats.State.STOPPED));
}
@SuppressWarnings("unchecked")
@ -405,7 +405,7 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase {
assertEquals(1, statsResponse.getTransformsStats().size());
DataFrameTransformStats stats = statsResponse.getTransformsStats().get(0);
assertEquals(DataFrameTransformTaskState.STOPPED, stats.getTaskState());
assertEquals(DataFrameTransformStats.State.STOPPED, stats.getState());
DataFrameIndexerTransformStats zeroIndexerStats = new DataFrameIndexerTransformStats(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L);
assertEquals(zeroIndexerStats, stats.getIndexerStats());
@ -420,8 +420,8 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase {
client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync);
DataFrameTransformStats stateAndStats = response.getTransformsStats().get(0);
assertNotEquals(zeroIndexerStats, stateAndStats.getIndexerStats());
assertThat(stateAndStats.getTaskState(),
is(oneOf(DataFrameTransformTaskState.STARTED, DataFrameTransformTaskState.STOPPED)));
assertThat(stateAndStats.getState(), oneOf(DataFrameTransformStats.State.STARTED, DataFrameTransformStats.State.INDEXING,
DataFrameTransformStats.State.STOPPING, DataFrameTransformStats.State.STOPPED));
assertThat(stateAndStats.getReason(), is(nullValue()));
});
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.client.dataframe.transforms;
import org.elasticsearch.client.core.IndexerState;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.test.ESTestCase;
@ -41,7 +40,6 @@ public class DataFrameTransformCheckpointStatsTests extends ESTestCase {
public static DataFrameTransformCheckpointStats randomDataFrameTransformCheckpointStats() {
return new DataFrameTransformCheckpointStats(randomLongBetween(1, 1_000_000),
randomBoolean() ? null : randomFrom(IndexerState.values()),
randomBoolean() ? null : DataFrameIndexerPositionTests.randomDataFrameIndexerPosition(),
randomBoolean() ? null : DataFrameTransformProgressTests.randomInstance(),
randomLongBetween(1, 1_000_000), randomLongBetween(0, 1_000_000));
@ -50,9 +48,6 @@ public class DataFrameTransformCheckpointStatsTests extends ESTestCase {
public static void toXContent(DataFrameTransformCheckpointStats stats, XContentBuilder builder) throws IOException {
builder.startObject();
builder.field(DataFrameTransformCheckpointStats.CHECKPOINT.getPreferredName(), stats.getCheckpoint());
if (stats.getIndexerState() != null) {
builder.field(DataFrameTransformCheckpointStats.INDEXER_STATE.getPreferredName(), stats.getIndexerState().value());
}
if (stats.getPosition() != null) {
builder.field(DataFrameTransformCheckpointStats.POSITION.getPreferredName());
DataFrameIndexerPositionTests.toXContent(stats.getPosition(), builder);

View File

@ -41,7 +41,7 @@ public class DataFrameTransformStatsTests extends ESTestCase {
public static DataFrameTransformStats randomInstance() {
return new DataFrameTransformStats(randomAlphaOfLength(10),
randomBoolean() ? null : randomFrom(DataFrameTransformTaskState.values()),
randomBoolean() ? null : randomFrom(DataFrameTransformStats.State.values()),
randomBoolean() ? null : randomAlphaOfLength(100),
randomBoolean() ? null : NodeAttributesTests.createRandom(),
DataFrameIndexerTransformStatsTests.randomStats(),
@ -51,9 +51,9 @@ public class DataFrameTransformStatsTests extends ESTestCase {
public static void toXContent(DataFrameTransformStats stats, XContentBuilder builder) throws IOException {
builder.startObject();
builder.field(DataFrameTransformStats.ID.getPreferredName(), stats.getId());
if (stats.getTaskState() != null) {
builder.field(DataFrameTransformStats.TASK_STATE_FIELD.getPreferredName(),
stats.getTaskState().value());
if (stats.getState() != null) {
builder.field(DataFrameTransformStats.STATE_FIELD.getPreferredName(),
stats.getState().value());
}
if (stats.getReason() != null) {
builder.field(DataFrameTransformStats.REASON_FIELD.getPreferredName(), stats.getReason());

View File

@ -22,7 +22,6 @@ package org.elasticsearch.client.dataframe.transforms.hlrc;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats;
import org.elasticsearch.xpack.core.indexing.IndexerState;
import java.io.IOException;
import java.util.function.Predicate;
@ -34,7 +33,6 @@ public class DataFrameTransformCheckpointStatsTests extends AbstractHlrcXContent
public static DataFrameTransformCheckpointStats fromHlrc(
org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats instance) {
return new DataFrameTransformCheckpointStats(instance.getCheckpoint(),
(instance.getIndexerState() != null) ? IndexerState.fromString(instance.getIndexerState().value()) : null,
DataFrameIndexerPositionTests.fromHlrc(instance.getPosition()),
DataFrameTransformProgressTests.fromHlrc(instance.getCheckpointProgress()),
instance.getTimestampMillis(),
@ -55,7 +53,6 @@ public class DataFrameTransformCheckpointStatsTests extends AbstractHlrcXContent
public static DataFrameTransformCheckpointStats randomDataFrameTransformCheckpointStats() {
return new DataFrameTransformCheckpointStats(randomLongBetween(1, 1_000_000),
randomBoolean() ? null : randomFrom(IndexerState.values()),
DataFrameIndexerPositionTests.randomDataFrameIndexerPosition(),
randomBoolean() ? null : DataFrameTransformProgressTests.randomDataFrameTransformProgress(),
randomLongBetween(1, 1_000_000), randomLongBetween(0, 1_000_000));

View File

@ -26,9 +26,7 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheck
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo;
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress;
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStats;
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState;
import org.elasticsearch.xpack.core.dataframe.transforms.NodeAttributes;
import org.elasticsearch.xpack.core.indexing.IndexerState;
import java.io.IOException;
import java.util.HashMap;
@ -50,7 +48,7 @@ public class DataFrameTransformStatsTests extends AbstractHlrcXContentTestCase<D
fromHlrc(org.elasticsearch.client.dataframe.transforms.DataFrameTransformStats instance) {
return new DataFrameTransformStats(instance.getId(),
DataFrameTransformTaskState.fromString(instance.getTaskState().value()),
DataFrameTransformStats.State.fromString(instance.getState().value()),
instance.getReason(),
fromHlrc(instance.getNode()),
DataFrameIndexerTransformStatsTests.fromHlrc(instance.getIndexerStats()),
@ -67,7 +65,7 @@ public class DataFrameTransformStatsTests extends AbstractHlrcXContentTestCase<D
public DataFrameTransformStats convertHlrcToInternal(
org.elasticsearch.client.dataframe.transforms.DataFrameTransformStats instance) {
return new DataFrameTransformStats(instance.getId(),
DataFrameTransformTaskState.fromString(instance.getTaskState().value()),
DataFrameTransformStats.State.fromString(instance.getState().value()),
instance.getReason(),
fromHlrc(instance.getNode()),
DataFrameIndexerTransformStatsTests.fromHlrc(instance.getIndexerStats()),
@ -76,7 +74,7 @@ public class DataFrameTransformStatsTests extends AbstractHlrcXContentTestCase<D
public static DataFrameTransformStats randomDataFrameTransformStats() {
return new DataFrameTransformStats(randomAlphaOfLength(10),
randomFrom(DataFrameTransformTaskState.values()),
randomFrom(DataFrameTransformStats.State.values()),
randomBoolean() ? null : randomAlphaOfLength(100),
randomBoolean() ? null : randomNodeAttributes(),
randomStats(),
@ -111,7 +109,6 @@ public class DataFrameTransformStatsTests extends AbstractHlrcXContentTestCase<D
public static DataFrameTransformCheckpointStats randomDataFrameTransformCheckpointStats() {
return new DataFrameTransformCheckpointStats(randomLongBetween(1, 1_000_000),
randomBoolean() ? null : randomFrom(IndexerState.values()),
DataFrameIndexerPositionTests.randomDataFrameIndexerPosition(),
randomBoolean() ? null : DataFrameTransformProgressTests.randomDataFrameTransformProgress(),
randomLongBetween(1, 1_000_000), randomLongBetween(0, 1_000_000));

View File

@ -25,7 +25,6 @@ import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.core.AcknowledgedResponse;
import org.elasticsearch.client.core.IndexerState;
import org.elasticsearch.client.core.PageParams;
import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest;
import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest;
@ -46,7 +45,6 @@ import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig;
import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate;
import org.elasticsearch.client.dataframe.transforms.DataFrameTransformProgress;
import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStats;
import org.elasticsearch.client.dataframe.transforms.DataFrameTransformTaskState;
import org.elasticsearch.client.dataframe.transforms.DestConfig;
import org.elasticsearch.client.dataframe.transforms.NodeAttributes;
import org.elasticsearch.client.dataframe.transforms.QueryConfig;
@ -622,24 +620,21 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest
assertThat(response.getTransformsStats(), hasSize(1));
// tag::get-data-frame-transform-stats-response
DataFrameTransformStats stateAndStatsInfo =
DataFrameTransformStats stats =
response.getTransformsStats().get(0); // <1>
DataFrameTransformTaskState taskState =
stateAndStatsInfo.getTaskState(); // <2>
IndexerState indexerState =
stateAndStatsInfo.getCheckpointingInfo()
.getNext().getIndexerState(); // <3>
DataFrameIndexerTransformStats transformStats =
stateAndStatsInfo.getIndexerStats(); // <4>
DataFrameTransformStats.State state =
stats.getState(); // <2>
DataFrameIndexerTransformStats indexerStats =
stats.getIndexerStats(); // <3>
DataFrameTransformProgress progress =
stateAndStatsInfo.getCheckpointingInfo()
.getNext().getCheckpointProgress(); // <5>
stats.getCheckpointingInfo()
.getNext().getCheckpointProgress(); // <4>
NodeAttributes node =
stateAndStatsInfo.getNode(); // <6>
stats.getNode(); // <5>
// end::get-data-frame-transform-stats-response
assertEquals(DataFrameTransformTaskState.STOPPED, taskState);
assertNotNull(transformStats);
assertEquals(DataFrameTransformStats.State.STOPPED, state);
assertNotNull(indexerStats);
assertNull(progress);
}
{

View File

@ -1,8 +1,7 @@
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.MavenFilteringHack
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.testfixtures.TestFixturesPlugin
import org.elasticsearch.gradle.testfixtures.TestFixturesPlugin
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.test.fixtures'
@ -58,7 +57,7 @@ project.ext {
}
from(project.projectDir.toPath().resolve("src/docker/Dockerfile")) {
MavenFilteringHack.filter(it, expansions(oss, local))
expand(expansions(oss, local))
}
}
}
@ -66,7 +65,9 @@ project.ext {
void addCopyDockerContextTask(final boolean oss) {
task(taskName("copy", oss, "DockerContext"), type: Sync) {
inputs.properties(expansions(oss, true))
expansions(oss, true).each { k, v ->
inputs.property(k, { v.toString() })
}
into files(oss)
with dockerBuildContext(oss, true)

View File

@ -13,7 +13,7 @@
FROM centos:7 AS builder
ENV PATH /usr/share/elasticsearch/bin:$PATH
ENV PATH /usr/share/elasticsearch/bin:\$PATH
RUN groupadd -g 1000 elasticsearch && \
adduser -u 1000 -g 1000 -d /usr/share/elasticsearch elasticsearch
@ -41,8 +41,8 @@ ENV ELASTIC_CONTAINER true
RUN for iter in {1..10}; do yum update -y && \
yum install -y nc && \
yum clean all && exit_code=0 && break || exit_code=$? && echo "yum error: retry $iter in 10s" && sleep 10; done; \
(exit $exit_code)
yum clean all && exit_code=0 && break || exit_code=\$? && echo "yum error: retry \$iter in 10s" && sleep 10; done; \
(exit \$exit_code)
RUN groupadd -g 1000 elasticsearch && \
adduser -u 1000 -g 1000 -G 0 -d /usr/share/elasticsearch elasticsearch && \
@ -57,7 +57,7 @@ COPY --from=builder --chown=1000:0 /usr/share/elasticsearch /usr/share/elasticse
# REF: https://github.com/elastic/elasticsearch-docker/issues/171
RUN ln -sf /etc/pki/ca-trust/extracted/java/cacerts /usr/share/elasticsearch/jdk/lib/security/cacerts
ENV PATH /usr/share/elasticsearch/bin:$PATH
ENV PATH /usr/share/elasticsearch/bin:\$PATH
COPY --chown=1000:0 bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh

View File

@ -64,6 +64,9 @@ final class JvmErgonomics {
ergonomicChoices.add("-Dio.netty.allocator.type=pooled");
}
}
if (systemProperties.containsKey("io.netty.allocator.numDirectArenas") == false) {
ergonomicChoices.add("-Dio.netty.allocator.numDirectArenas=0");
}
final long maxDirectMemorySize = extractMaxDirectMemorySize(finalJvmOptions);
if (maxDirectMemorySize == 0) {
ergonomicChoices.add("-XX:MaxDirectMemorySize=" + heapSize / 2);

View File

@ -48,9 +48,8 @@ The returned +{response}+ contains the requested {dataframe-transform} statistic
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------
<1> The response contains a list of `DataFrameTransformStats` objects
<2> The running state of the transform task e.g `started`
<3> The running state of the transform indexer e.g `started`, `indexing`, etc.
<4> The overall transform statistics recording the number of documents indexed etc.
<5> The progress of the current run in the transform. Supplies the number of docs left until the next checkpoint
<2> The running state of the transform, for example `started`, `indexing`, etc.
<3> The overall transform statistics recording the number of documents indexed etc.
<4> The progress of the current run in the transform. Supplies the number of docs left until the next checkpoint
and the total number of docs expected.
<6> The assigned node information if the task is currently assigned to a node and running.
<5> The assigned node information if the task is currently assigned to a node and running.

View File

@ -1,7 +1,35 @@
[[cat-plugins]]
=== cat plugins
The `plugins` command provides a view per node of running plugins. This information *spans nodes*.
Returns a list of plugins running on each node of a cluster.
[[cat-plugins-tasks-api-request]]
==== {api-request-title}
`GET /_cat/plugins`
[[cat-plugins-tasks-api-query-params]]
==== {api-query-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=http-format]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]
[[cat-plugins-api-example]]
==== {api-examples-title}
[source,js]
------------------------------------------------------------------------------
@ -9,7 +37,7 @@ GET /_cat/plugins?v&s=component&h=name,component,version,description
------------------------------------------------------------------------------
// CONSOLE
Might look like:
The API returns the following response:
["source","txt",subs="attributes,callouts"]
------------------------------------------------------------------------------
@ -31,6 +59,4 @@ U7321H6 mapper-size {version_qualified} The Mapper Size plugin allow
U7321H6 store-smb {version_qualified} The Store SMB plugin adds support for SMB stores.
U7321H6 transport-nio {version_qualified} The nio transport.
------------------------------------------------------------------------------
// TESTRESPONSE[s/([.()])/\\$1/ s/U7321H6/.+/ non_json]
We can tell quickly how many plugins per node we have and which versions.
// TESTRESPONSE[s/([.()])/\\$1/ s/U7321H6/.+/ non_json]

View File

@ -1,16 +1,61 @@
[[cat-recovery]]
=== cat recovery
The `recovery` command is a view of index shard recoveries, both on-going and previously
completed. It is a more compact view of the JSON <<indices-recovery,recovery>> API.
Returns information about ongoing and completed index shard recoveries, similar
to the <<indices-recovery, indices recovery>> API.
A recovery event occurs anytime an index shard moves to a different node in the cluster.
This can happen during a snapshot recovery, a change in replication level, node failure, or
on node startup. This last type is called a local store recovery and is the normal
way for shards to be loaded from disk when a node starts up.
As an example, here is what the recovery state of a cluster may look like when there
are no shards in transit from one node to another:
[[cat-recovery-api-request]]
==== {api-request-title}
`GET /_cat/recovery/{index}`
[[cat-recovery-api-desc]]
==== {api-description-title}
The cat recovery API returns information about index shard recoveries, both
ongoing and completed. It is a more compact view of the JSON
<<indices-recovery,indices recovery>> API.
A recovery event occurs anytime an index shard moves to a different node in the
cluster. This can happen during a snapshot recovery, a change in replication
level, node failure, or on node startup. This last type is called a local store
recovery and is the normal way for shards to be loaded from disk when a node
starts up.
[[cat-recovery-path-params]]
==== {api-path-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=index]
[[cat-recovery-query-params]]
==== {api-query-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=bytes]
include::{docdir}/rest-api/common-parms.asciidoc[tag=http-format]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]
[[cat-recovery-api-example]]
==== {api-examples-title}
[[cat-recovery-api-ex-dead]]
===== Example with no ongoing recoveries
[source,js]
----------------------------------------------------------------------------
@ -19,7 +64,7 @@ GET _cat/recovery?v
// CONSOLE
// TEST[setup:twitter]
The response of this request will be something like:
The API returns the following response:
[source,txt]
---------------------------------------------------------------------------
@ -32,12 +77,15 @@ twitter 0 13ms store done n/a n/a 127.0.0.1 node-0 n
// TESTRESPONSE[s/13ms/[0-9.]+m?s/]
// TESTRESPONSE[s/13/\\d+/ non_json]
In the above case, the source and target nodes are the same because the recovery
type was store, i.e. they were read from local storage on node start.
In this example response, the source and target nodes are the same because the
recovery type is `store`, meaning they were read from local storage on node
start.
Now let's see what a live recovery looks like. By increasing the replica count
of our index and bringing another node online to host the replicas, we can see
what a live shard recovery looks like.
[[cat-recovery-api-ex-live]]
===== Example with a live shard recovery
By increasing the replica count of an index and bringing another node online to
host the replicas, you can retrieve information about an ongoing recovery.
[source,js]
----------------------------------------------------------------------------
@ -46,7 +94,7 @@ GET _cat/recovery?v&h=i,s,t,ty,st,shost,thost,f,fp,b,bp
// CONSOLE
// TEST[setup:twitter]
This will return a line like:
The API returns the following response:
[source,txt]
----------------------------------------------------------------------------
@ -59,13 +107,16 @@ twitter 0 1252ms peer done 192.168.1.1 192.168.1.2 0 100.0% 0 100.0%
// TESTRESPONSE[s/100.0%/0.0%/]
// TESTRESPONSE[s/1252ms/[0-9.]+m?s/ non_json]
We can see in the above listing that our thw twitter shard was recovered from another node.
Notice that the recovery type is shown as `peer`. The files and bytes copied are
real-time measurements.
In this example response, the recovery type is `peer`, meaning the shard
recovered from another node. The returned files and bytes are real-time
measurements.
Finally, let's see what a snapshot recovery looks like. Assuming I have previously
made a backup of my index, I can restore it using the <<modules-snapshots,snapshot and restore>>
API.
[[cat-recovery-api-ex-snapshot]]
===== Example with a snapshot recovery
You can restore backups of an index using the <<modules-snapshots,snapshot and
restore>> API. You can use the cat recovery API retrieve information about a
snapshot recovery.
[source,js]
--------------------------------------------------------------------------------
@ -74,11 +125,11 @@ GET _cat/recovery?v&h=i,s,t,ty,st,rep,snap,f,fp,b,bp
// CONSOLE
// TEST[skip:no need to execute snapshot/restore here]
This will show a recovery of type snapshot in the response
The API returns the following response with a recovery type of `snapshot`:
[source,txt]
--------------------------------------------------------------------------------
i s t ty st rep snap f fp b bp
twitter 0 1978ms snapshot done twitter snap_1 79 8.0% 12086 9.0%
--------------------------------------------------------------------------------
// TESTRESPONSE[non_json]
// TESTRESPONSE[non_json]

View File

@ -1,8 +1,35 @@
[[cat-repositories]]
=== cat repositories
The `repositories` command shows the snapshot repositories registered in the
cluster. For example:
Returns the <<snapshots-repositories,snapshot repositories>> for a cluster.
[[cat-repositories-api-request]]
==== {api-request-title}
`GET /_cat/repositories`
[[cat-repositories-query-params]]
==== {api-query-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=http-format]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]
[[cat-repositories-api-example]]
==== {api-examples-title}
[source,js]
--------------------------------------------------
@ -11,7 +38,7 @@ GET /_cat/repositories?v
// CONSOLE
// TEST[s/^/PUT \/_snapshot\/repo1\n{"type": "fs", "settings": {"location": "repo\/1"}}\n/]
might looks like:
The API returns the following response:
[source,txt]
--------------------------------------------------
@ -20,5 +47,3 @@ repo1 fs
repo2 s3
--------------------------------------------------
// TESTRESPONSE[s/\nrepo2 s3// non_json]
We can quickly see which repositories are registered and their type.

View File

@ -1,9 +1,109 @@
[[cat-segments]]
=== cat segments
The `segments` command provides low level information about the segments
in the shards of an index. It provides information similar to the
link:indices-segments.html[_segments] endpoint. For example:
Returns low-level information about the https://lucene.apache.org/core/[Lucene]
segments in index shards, similar to the <<indices-segments, indices segments>>
API.
[[cat-segments-api-request]]
==== {api-request-title}
`GET /_cat/segments/{index}`
[[cat-segments-path-params]]
==== {api-path-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=index]
[[cat-segments-query-params]]
==== {api-query-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=bytes]
include::{docdir}/rest-api/common-parms.asciidoc[tag=http-format]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
+
--
If you do not specify which columns to include, the API returns the default
columns in the order listed below. If you explicitly specify one or more
columns, it only returns the specified columns.
Valid columns are:
`index`, `i`, `idx`::
(Default) Name of the index, such as `twitter`.
`shard`, `s`, `sh`::
(Default) Name of the shard.
`prirep`, `p`, `pr`, `primaryOrReplica`::
(Default) Shard type. Returned values are `primary` or `replica`.
`ip`::
(Default) IP address of the segment's shard, such as `127.0.1.1`.
`segment`::
(Default) Name of the segment, such as `_0`. The segment name is derived from
the segment generation and used internally to create file names in the directory
of the shard.
`generation`::
(Default) Generation number, such as `0`. {es} increments this generation number
for each segment written. {es} then uses this number to derive the segment name.
`docs.count`::
(Default) Number of non-deleted documents in the segment, such as `25`. This
number is based on Lucene documents and may include documents from
<<nested,nested>> fields.
`docs.deleted`::
(Default) Number of deleted documents in the segment, such as `0`. This number
is based on Lucene documents. {es} reclaims the disk space of deleted Lucene
documents when a segment is merged.
`size`::
(Default) Disk space used by the segment, such as `50kb`.
`size.memory`::
(Default) Bytes of segment data stored in memory for efficient search, such as
`1264`.
`committed`::
(Default) If `true`, the segment is committed to disk. Segments committed to
disk would survive a hard reboot.
+
If `false`, the data from uncommitted segments is also stored in the transaction
log. {es} replays those changes on the next start.
`searchable`::
(Default) If `true`, the segment is searchable.
+
If `false`, likely means the segment is written to disk but has not been
<<docs-refresh,refreshed>>.
`version`::
(Default) Version of Lucene used to write the segment.
`compound`::
(Default) If `true`, the segment is stored in a compound file. This means Lucene
merged all files from the segment in a single file to save file descriptors.
`id`::
ID of the node, such as `k0zy`.
--
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]
[[cat-shards-api-example]]
==== {api-examples-title}
[source,js]
--------------------------------------------------
@ -12,7 +112,7 @@ GET /_cat/segments?v
// CONSOLE
// TEST[s/^/PUT \/test\/test\/1?refresh\n{"test":"test"}\nPUT \/test1\/test\/1?refresh\n{"test":"test"}\n/]
might look like:
The API returns the following response:
["source","txt",subs="attributes,callouts"]
--------------------------------------------------
@ -21,53 +121,3 @@ test 0 p 127.0.0.1 _0 0 1 0 3kb
test1 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true
--------------------------------------------------
// TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ non_json]
The output shows information about index names and shard numbers in the first
two columns.
If you only want to get information about segments in one particular index,
you can add the index name in the URL, for example `/_cat/segments/test`. Also,
several indexes can be queried like `/_cat/segments/test,test1`
The following columns provide additional monitoring information:
prirep:: Whether this segment belongs to a primary or replica shard.
ip:: The ip address of the segment's shard.
segment:: A segment name, derived from the segment generation. The name
is internally used to generate the file names in the directory
of the shard this segment belongs to.
generation:: The generation number is incremented with each segment that is written.
The name of the segment is derived from this generation number.
docs.count:: The number of non-deleted documents that are stored in this segment.
Note that these are Lucene documents, so the count will include hidden
documents (e.g. from nested types).
docs.deleted:: The number of deleted documents that are stored in this segment.
It is perfectly fine if this number is greater than 0, space is
going to be reclaimed when this segment gets merged.
size:: The amount of disk space that this segment uses.
size.memory:: Segments store some data into memory in order to be searchable efficiently.
This column shows the number of bytes in memory that are used.
committed:: Whether the segment has been sync'ed on disk. Segments that are
committed would survive a hard reboot. No need to worry in case
of false, the data from uncommitted segments is also stored in
the transaction log so that Elasticsearch is able to replay
changes on the next start.
searchable:: True if the segment is searchable. A value of false would most
likely mean that the segment has been written to disk but no
refresh occurred since then to make it searchable.
version:: The version of Lucene that has been used to write this segment.
compound:: Whether the segment is stored in a compound file. When true, this
means that Lucene merged all files from the segment in a single
one in order to save file descriptors.

View File

@ -1,9 +1,110 @@
[[cat-snapshots]]
=== cat snapshots
The `snapshots` command shows all snapshots that belong to a specific repository.
To find a list of available repositories to query, the command `/_cat/repositories` can be used.
Querying the snapshots of a repository named `repo1` then looks as follows.
Returns information about the <<modules-snapshots,snapshots>> stored in one or
more repositories. A snapshot is a backup of an index or running {es} cluster.
[[cat-snapshots-api-request]]
==== {api-request-title}
`GET /_cat/snapshots/{repository}`
[[cat-snapshots-path-params]]
==== {api-path-parms-title}
`{repository}`::
+
--
(Optional, string) Comma-separated list of snapshot repositories used to limit
the request. Accepts wildcard expressions. `_all` returns all repositories.
If any repository fails during the request, {es} returns an error.
--
[[cat-snapshots-query-params]]
==== {api-query-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=http-format]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
+
--
If you do not specify which columns to include, the API returns the default
columns in the order listed below. If you explicitly specify one or more
columns, it only returns the specified columns.
Valid columns are:
`id`, `snapshot`::
(Default) ID of the snapshot, such as `snap1`.
`repository`, `re`, `repo`::
(Default) Name of the repository, such as `repo1`.
`status`, `s`::
(Default) State of the snapshot process. Returned values are:
+
* `FAILED`: The snapshot process failed.
* `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster
version.
* `IN_PROGRESS`: The snapshot process started but has not completed.
* `PARTIAL`: The snapshot process completed with a partial success.
* `SUCCESS`: The snapshot process completed with a full success.
`start_epoch`, `ste`, `startEpoch`::
(Default) https://en.wikipedia.org/wiki/Unix_time[Unix `epoch` time] at which
the snapshot process started.
`start_time`, `sti`, `startTime`::
(Default) `HH:MM:SS` time at which the snapshot process started.
`end_epoch`, `ete`, `endEpoch`::
(Default) https://en.wikipedia.org/wiki/Unix_time[Unix `epoch` time] at which
the snapshot process ended.
`end_time`, `eti`, `endTime`::
(Default) `HH:MM:SS` time at which the snapshot process ended.
`duration`, `dur`::
(Default) Time it took the snapshot process to complete in <<time-units,time
units>>.
`indices`, `i`::
(Default) Number of indices in the snapshot.
`successful_shards`, `ss`::
(Default) Number of successful shards in the snapshot.
`failed_shards`, `fs`::
(Default) Number of failed shards in the snapshot.
`total_shards`, `ts`::
(Default) Total number of shards in the snapshot.
`reason, `r`::
Reason for any snapshot failures.
--
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
`ignore_unavailable`::
(Optional, boolean) If `true`, the response does not include information from
unavailable snapshots. Defaults to `false`.
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]
[[cat-snapshots-api-example]]
==== {api-examples-title}
[source,js]
--------------------------------------------------
@ -14,7 +115,7 @@ GET /_cat/snapshots/repo1?v&s=id
// TEST[s/^/PUT \/_snapshot\/repo1\/snap2?wait_for_completion=true\n/]
// TEST[s/^/PUT \/_snapshot\/repo1\n{"type": "fs", "settings": {"location": "repo\/1"}}\n/]
Which looks like:
The API returns the following response:
[source,txt]
--------------------------------------------------
@ -28,7 +129,3 @@ snap2 SUCCESS 1445634298 23:04:58 1445634672 23:11:12 6.2m 2
// TESTRESPONSE[s/2 10 0 10/\\d+ \\d+ \\d+ \\d+/]
// TESTRESPONSE[non_json]
Each snapshot contains information about when it was started and stopped.
Start and stop timestamps are available in two formats.
The `HH:MM:SS` output is simply for quick human consumption.
The epoch time retains more information, including date, and is machine sortable if the snapshot process spans days.

View File

@ -1,7 +1,45 @@
[[cat-templates]]
=== cat templates
The `templates` command provides information about existing templates.
Returns information about <<indices-templates,index templates>> in a cluster.
You can use index templates to apply <<index-modules-settings,index settings>>
and <<mapping,field mappings>> to new indices at creation.
[[cat-templates-api-request]]
==== {api-request-title}
`GET /_cat/templates/{template_name}`
[[cat-templates-path-params]]
==== {api-path-parms-title}
`{template_name}`::
(Optional, string) Comma-separated list of index template names used to limit
the request. Accepts wildcard expressions.
[[cat-templates-query-params]]
==== {api-query-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=http-format]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]
[[cat-templates-api-example]]
==== {api-examples-title}
[source,js]
--------------------------------------------------
@ -19,7 +57,7 @@ GET /_cat/templates?v&s=name
// templates.
// 2. Create some templates to expect in the response.
which looks like
The API returns the following response:
[source,txt]
--------------------------------------------------
@ -29,10 +67,3 @@ template1 [tea*] 1
template2 [teak*] 2 7
--------------------------------------------------
// TESTRESPONSE[s/\*/\\*/ s/\[/\\[/ s/\]/\\]/ non_json]
The output shows that there are three existing templates,
with template2 having a version value.
The endpoint also supports giving a template name or pattern in the url
to filter the results, for example `/_cat/templates/template*` or
`/_cat/templates/template0`.

View File

@ -1,8 +1,119 @@
[[cat-thread-pool]]
=== cat thread pool
The `thread_pool` command shows cluster wide thread pool statistics per node. By default the active, queue and rejected
statistics are returned for all thread pools.
Returns thread pool statistics for each node in a cluster. Returned information
includes all <<modules-threadpool,built-in thread pools>> and custom thread
pools.
[[cat-thread-pool-api-request]]
==== {api-request-title}
`GET /_cat/thread_pool/{thread_pool}`
[[cat-thread-pool-path-params]]
==== {api-path-parms-title}
`{thread_pool}`::
(Optional, string) Comma-separated list of thread pool names used to limit the
request. Accepts wildcard expressions.
[[cat-thread-pool-query-params]]
==== {api-query-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=http-format]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
+
--
If you do not specify which columns to include, the API returns the default
columns in the order listed below. If you explicitly specify one or more
columns, it only returns the specified columns.
Valid columns are:
`node_name`::
(Default) Node name, such as `I8hydUG`.
`name`::
(Default) Name of the thread pool, such as `analyze` or `generic`.
`active`, `a`::
(Default) Number of active threads in the current thread pool.
`queue`,`q`::
(Default) Number of tasks in the queue for the current thread pool.
`rejected`, `r`::
(Default) Number of tasks rejected by the thread pool executor.
`completed`, `c`::
Number of tasks completed by the thread pool executor.
`core`, `cr`::
Configured core number of active threads allowed in the current thread pool.
`ephemeral_id`,`eid`::
Ephemeral node ID.
`host`, `h`::
Hostname for the current node.
`ip`, `i`::
IP address for the current node.
`keep_alive`, `k`::
Configured keep alive time for threads.
`largest`, `l`::
Highest number of active threads in the current thread pool.
`max`, `mx`::
Configured maximum number of active threads allowed in the current thread pool.
`node_id`, `id`::
ID of the node, such as `k0zy`.
`pid`, `p`::
Process ID of the running node.
`pool_size`, `psz`::
Number of threads in the current thread pool.
`port`, `po`::
Bound transport port for the current node.
`queue_size`, `qs`::
Maximum number of tasks permitted in the queue for the current thread pool.
`size`, `sz`::
Configured fixed number of active threads allowed in the current thread pool.
`type`, `t`::
Type of thread pool. Returned values are `fixed` or `scaling`.
--
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
`size`::
(Optional, <<size-units, size unit>>) Multiplier used to display quantities.
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]
[[cat-thread-pool-api-example]]
==== {api-examples-title}
[[cat-thread-pool-api-ex-default]]
===== Example with default columns
[source,js]
--------------------------------------------------
@ -10,7 +121,7 @@ GET /_cat/thread_pool
--------------------------------------------------
// CONSOLE
Which looks like:
The API returns the following response:
[source,txt]
--------------------------------------------------
@ -33,66 +144,13 @@ node-0 write 0 0 0
// know how many there will be and we just want to assert that there are
// numbers in the response, not *which* numbers are there.
The first column is the node name
[source,txt]
--------------------------------------------------
node_name
node-0
--------------------------------------------------
[[cat-thread-pool-api-ex-headings]]
===== Example with explicit columns
The second column is the thread pool name
[source,txt]
--------------------------------------------------
name
analyze
ccr (default distro only)
fetch_shard_started
fetch_shard_store
flush
force_merge
generic
get
listener
management
ml_autodetect (default distro only)
ml_datafeed (default distro only)
ml_utility (default distro only)
refresh
rollup_indexing (default distro only)
search
security-token-key (default distro only)
snapshot
warmer
watcher (default distro only)
write
--------------------------------------------------
The next three columns show the active, queue, and rejected statistics for each thread pool
[source,txt]
--------------------------------------------------
active queue rejected
0 0 0
0 0 0
0 0 0
0 0 0
0 0 0
0 0 0
0 0 0
0 0 0
0 0 0
0 0 0
1 0 0
0 0 0
0 0 0
0 0 0
0 0 0
--------------------------------------------------
The cat thread pool API accepts a `thread_pool_patterns` URL parameter for specifying a
comma-separated list of regular expressions to match thread pool names.
The following API request returns the `id`, `name`, `active`, `rejected`, and
`completed` columns. The request limits returned information to the `generic`
thread pool.
[source,js]
--------------------------------------------------
@ -100,7 +158,7 @@ GET /_cat/thread_pool/generic?v&h=id,name,active,rejected,completed
--------------------------------------------------
// CONSOLE
which looks like:
The API returns the following response:
[source,txt]
--------------------------------------------------
@ -109,46 +167,3 @@ id name active rejected completed
--------------------------------------------------
// TESTRESPONSE[s/0EWUhXeBQtaVGlexUeVwMg/[\\w-]+/ s/\d+/\\d+/ non_json]
Here the host columns and the active, rejected and completed suggest thread pool statistics are displayed.
All <<modules-threadpool,built-in thread pools>> and custom thread pools are available.
[float]
===== Thread Pool Fields
For each thread pool, you can load details about it by using the field names
in the table below.
[cols="<,<,<",options="header"]
|=======================================================================
|Field Name |Alias |Description
|`type` |`t` |The current (*) type of thread pool (`fixed` or `scaling`)
|`active` |`a` |The number of active threads in the current thread pool
|`pool_size` |`psz` |The number of threads in the current thread pool
|`queue` |`q` |The number of tasks in the queue for the current thread pool
|`queue_size` |`qs` |The maximum number of tasks permitted in the queue for the current thread pool
|`rejected` |`r` |The number of tasks rejected by the thread pool executor
|`largest` |`l` |The highest number of active threads in the current thread pool
|`completed` |`c` |The number of tasks completed by the thread pool executor
|`core` |`cr` |The configured core number of active threads allowed in the current thread pool
|`max` |`mx` |The configured maximum number of active threads allowed in the current thread pool
|`size` |`sz` |The configured fixed number of active threads allowed in the current thread pool
|`keep_alive` |`k` |The configured keep alive time for threads
|=======================================================================
[float]
==== Other Fields
In addition to details about each thread pool, it is also convenient to get an
understanding of where those thread pools reside. As such, you can request
other details like the `ip` of the responding node(s).
[cols="<,<,<",options="header"]
|=======================================================================
|Field Name |Alias |Description
|`node_id` |`id` |The unique node ID
|`ephemeral_id`|`eid` |The ephemeral node ID
|`pid` |`p` |The process ID of the running node
|`host` |`h` |The hostname for the current node
|`ip` |`i` |The IP address for the current node
|`port` |`po` |The bound transport port for the current node
|=======================================================================

View File

@ -126,7 +126,7 @@ The API returns the following results:
"transforms" : [
{
"id" : "ecommerce_transform",
"task_state" : "started",
"state" : "indexing",
"stats" : {
"pages_processed" : 2,
"documents_processed" : 1220,
@ -147,7 +147,6 @@ The API returns the following results:
},
"next" : {
"checkpoint" : 101,
"indexer_state" : "started",
"position" : {
"indexer_position" : {
"hashtag" : "abcd1234"

View File

@ -20,6 +20,9 @@ name of the restored index as well as some of its settings. There is a great
deal of flexibility in how the snapshot and restore functionality can be used.
// end::restore-intro[]
You can automate your snapshot backup and restore process by using
<<getting-started-snapshot-lifecycle-management, snapshot lifecycle management>>.
// tag::backup-warning[]
WARNING: You cannot back up an Elasticsearch cluster by simply taking a copy of
the data directories of all of its nodes. Elasticsearch may be making changes to
@ -124,7 +127,7 @@ which returns:
-----------------------------------
// TESTRESPONSE
To retrieve information about multiple repositories, specify a comma-delimited
To retrieve information about multiple repositories, specify a comma-delimited
list of repositories. You can also use the * wildcard when
specifying repository names. For example, the following request retrieves
information about all of the snapshot repositories that start with `repo` or

View File

@ -4,17 +4,25 @@
<titleabbrev>Intervals</titleabbrev>
++++
An `intervals` query allows fine-grained control over the order and proximity of
matching terms. Matching rules are constructed from a small set of definitions,
and the rules are then applied to terms from a particular `field`.
Returns documents based on the order and proximity of matching terms.
The `intervals` query uses *matching rules*, constructed from a small set of
definitions. Theses rules are then applied to terms from a specified `field`.
The definitions produce sequences of minimal intervals that span terms in a
body of text. These intervals can be further combined and filtered by
body of text. These intervals can be further combined and filtered by
parent sources.
The example below will search for the phrase `my favourite food` appearing
before the terms `hot` and `water` or `cold` and `porridge` in any order, in
the field `my_text`
[[intervals-query-ex-request]]
==== Example request
The following `intervals` search returns documents containing `my
favorite food` immediately followed by `hot water` or `cold porridge` in the
`my_text` field.
This search would match a `my_text` value of `my favorite food is cold
porridge` but not `when it's cold my favorite food is porridge`.
[source,js]
--------------------------------------------------
@ -28,7 +36,7 @@ POST _search
"intervals" : [
{
"match" : {
"query" : "my favourite food",
"query" : "my favorite food",
"max_gaps" : 0,
"ordered" : true
}
@ -42,8 +50,7 @@ POST _search
}
}
]
},
"_name" : "favourite_food"
}
}
}
}
@ -51,69 +58,103 @@ POST _search
--------------------------------------------------
// CONSOLE
In the above example, the text `my favourite food is cold porridge` would
match because the two intervals matching `my favourite food` and `cold
porridge` appear in the correct order, but the text `when it's cold my
favourite food is porridge` would not match, because the interval matching
`cold porridge` starts before the interval matching `my favourite food`.
[[intervals-top-level-params]]
==== Top-level parameters for `intervals`
[[intervals-rules]]
`<field>`::
+
--
(Required, rule object) Field you wish to search.
The value of this parameter is a rule object used to match documents
based on matching terms, order, and proximity.
Valid rules include:
* <<intervals-match,`match`>>
* <<intervals-prefix,`prefix`>>
* <<intervals-wildcard,`wildcard`>>
* <<intervals-all_of,`all_of`>>
* <<intervals-any_of,`any_of`>>
* <<interval_filter,`filter`>>
--
[[intervals-match]]
==== `match`
==== `match` rule parameters
The `match` rule matches analyzed text, and takes the following parameters:
The `match` rule matches analyzed text.
[horizontal]
`query`::
The text to match.
(Required, string) Text you wish to find in the provided `<field>`.
`max_gaps`::
Specify a maximum number of gaps between the terms in the text. Terms that
appear further apart than this will not match. If unspecified, or set to -1,
then there is no width restriction on the match. If set to 0 then the terms
must appear next to each other.
+
--
(Optional, integer) Maximum number of positions between the matching terms.
Terms further apart than this are not considered matches. Defaults to
`-1`.
If unspecified or set to `-1`, there is no width restriction on the match. If
set to `0`, the terms must appear next to each other.
--
`ordered`::
Whether or not the terms must appear in their specified order. Defaults to
`false`
(Optional, boolean)
If `true`, matching terms must appear in their specified order. Defaults to
`false`.
`analyzer`::
Which analyzer should be used to analyze terms in the `query`. By
default, the search analyzer of the top-level field will be used.
(Optional, string) <<analysis, analyzer>> used to analyze terms in the `query`.
Defaults to the top-level `<field>`'s analyzer.
`filter`::
An optional <<interval_filter,interval filter>>
(Optional, <<interval_filter,interval filter>> rule object) An optional interval
filter.
`use_field`::
If specified, then match intervals from this field rather than the top-level field.
Terms will be analyzed using the search analyzer from this field. This allows you
to search across multiple fields as if they were all the same field; for example,
you could index the same text into stemmed and unstemmed fields, and search for
stemmed tokens near unstemmed ones.
(Optional, string) If specified, then match intervals from this
field rather than the top-level `<field>`. Terms are analyzed using the
search analyzer from this field. This allows you to search across multiple
fields as if they were all the same field; for example, you could index the same
text into stemmed and unstemmed fields, and search for stemmed tokens near
unstemmed ones.
[[intervals-prefix]]
==== `prefix`
==== `prefix` rule parameters
The `prefix` rule finds terms that start with a specified prefix. The prefix will
expand to match at most 128 terms; if there are more matching terms in the index,
then an error will be returned. To avoid this limit, enable the
<<index-prefixes,`index-prefixes`>> option on the field being searched.
The `prefix` rule matches terms that start with a specified set of characters.
This prefix can expand to match at most 128 terms. If the prefix matches more
than 128 terms, {es} returns an error. You can use the
<<index-prefixes,`index-prefixes`>> option in the field mapping to avoid this
limit.
[horizontal]
`prefix`::
Match terms starting with this prefix
(Required, string) Beginning characters of terms you wish to find in the
top-level `<field>`.
`analyzer`::
Which analyzer should be used to normalize the `prefix`. By default, the
search analyzer of the top-level field will be used.
(Optional, string) <<analysis, analyzer>> used to normalize the `prefix`.
Defaults to the top-level `<field>`'s analyzer.
`use_field`::
If specified, then match intervals from this field rather than the top-level field.
The `prefix` will be normalized using the search analyzer from this field, unless
`analyzer` is specified separately.
+
--
(Optional, string) If specified, then match intervals from this field rather
than the top-level `<field>`.
The `prefix` is normalized using the search analyzer from this field, unless a
separate `analyzer` is specified.
--
[[intervals-wildcard]]
==== `wildcard`
==== `wildcard` rule parameters
The `wildcard` rule finds terms that match a wildcard pattern. The pattern will
expand to match at most 128 terms; if there are more matching terms in the index,
then an error will be returned.
The `wildcard` rule matches terms using a wildcard pattern. This pattern can
expand to match at most 128 terms. If the pattern matches more than 128 terms,
{es} returns an error.
[horizontal]
`pattern`::
Find terms matching this pattern
(Required, string) Wildcard pattern used to find matching terms.
+
--
This parameter supports two wildcard operators:
@ -125,51 +166,112 @@ WARNING: Avoid beginning patterns with `*` or `?`. This can increase
the iterations needed to find matching terms and slow search performance.
--
`analyzer`::
Which analyzer should be used to normalize the `pattern`. By default, the
search analyzer of the top-level field will be used.
(Optional, string) <<analysis, analyzer>> used to normalize the `pattern`.
Defaults to the top-level `<field>`'s analyzer.
`use_field`::
If specified, then match intervals from this field rather than the top-level field.
The `pattern` will be normalized using the search analyzer from this field, unless
+
--
(Optional, string) If specified, match intervals from this field rather than the
top-level `<field>`.
The `pattern` is normalized using the search analyzer from this field, unless
`analyzer` is specified separately.
--
[[intervals-all_of]]
==== `all_of`
==== `all_of` rule parameters
`all_of` returns returns matches that span a combination of other rules.
The `all_of` rule returns matches that span a combination of other rules.
[horizontal]
`intervals`::
An array of rules to combine. All rules must produce a match in a
document for the overall source to match.
(Required, array of rule objects) An array of rules to combine. All rules must
produce a match in a document for the overall source to match.
`max_gaps`::
Specify a maximum number of gaps between the rules. Combinations that match
across a distance greater than this will not match. If set to -1 or
unspecified, there is no restriction on this distance. If set to 0, then the
matches produced by the rules must all appear immediately next to each other.
+
--
(Optional, integer) Maximum number of positions between the matching terms.
Intervals produced by the rules further apart than this are not considered
matches. Defaults to `-1`.
If unspecified or set to `-1`, there is no width restriction on the match. If
set to `0`, the terms must appear next to each other.
--
`ordered`::
Whether the intervals produced by the rules should appear in the order in
which they are specified. Defaults to `false`
(Optional, boolean) If `true`, intervals produced by the rules should appear in
the order in which they are specified. Defaults to `false`.
`filter`::
An optional <<interval_filter,interval filter>>
(Optional, <<interval_filter,interval filter>> rule object) Rule used to filter
returned intervals.
[[intervals-any_of]]
==== `any_of`
==== `any_of` rule parameters
The `any_of` rule emits intervals produced by any of its sub-rules.
The `any_of` rule returns intervals produced by any of its sub-rules.
[horizontal]
`intervals`::
An array of rules to match
(Required, array of rule objects) An array of rules to match.
`filter`::
An optional <<interval_filter,interval filter>>
(Optional, <<interval_filter,interval filter>> rule object) Rule used to filter
returned intervals.
[[interval_filter]]
==== filters
==== `filter` rule parameters
You can filter intervals produced by any rules by their relation to the
intervals produced by another rule. The following example will return
documents that have the words `hot` and `porridge` within 10 positions
of each other, without the word `salty` in between:
The `filter` rule returns intervals based on a query. See
<<interval-filter-rule-ex>> for an example.
`after`::
(Optional, query object) Query used to return intervals that follow an interval
from the `filter` rule.
`before`::
(Optional, query object) Query used to return intervals that occur before an
interval from the `filter` rule.
`contained_by`::
(Optional, query object) Query used to return intervals contained by an interval
from the `filter` rule.
`containing`::
(Optional, query object) Query used to return intervals that contain an interval
from the `filter` rule.
`not_contained_by`::
(Optional, query object) Query used to return intervals that are *not*
contained by an interval from the `filter` rule.
`not_containing`::
(Optional, query object) Query used to return intervals that do *not* contain
an interval from the `filter` rule.
`not_overlapping`::
(Optional, query object) Query used to return intervals that do *not* overlap
with an interval from the `filter` rule.
`overlapping`::
(Optional, query object) Query used to return intervals that overlap with an
interval from the `filter` rule.
`script`::
(Optional, <<modules-scripting-using, script object>>) Script used to return
matching documents. This script must return a boolean value, `true` or `false`.
See <<interval-script-filter>> for an example.
[[intervals-query-note]]
==== Notes
[[interval-filter-rule-ex]]
===== Filter example
The following search includes a `filter` rule. It returns documents that have
the words `hot` and `porridge` within 10 positions of each other, without the
word `salty` in between:
[source,js]
--------------------------------------------------
@ -196,31 +298,12 @@ POST _search
--------------------------------------------------
// CONSOLE
The following filters are available:
[horizontal]
`containing`::
Produces intervals that contain an interval from the filter rule
`contained_by`::
Produces intervals that are contained by an interval from the filter rule
`not_containing`::
Produces intervals that do not contain an interval from the filter rule
`not_contained_by`::
Produces intervals that are not contained by an interval from the filter rule
`overlapping`::
Produces intervals that overlap with an interval from the filter rule
`not_overlapping`::
Produces intervals that do not overlap with an interval from the filter rule
`before`::
Produces intervals that appear before an interval from the filter role
`after`::
Produces intervals that appear after an interval from the filter role
[[interval-script-filter]]
==== Script filters
===== Script filters
You can also filter intervals based on their start position, end position and
internal gap count, using a script. The script has access to an `interval`
variable, with `start`, `end` and `gaps` methods:
You can use a script to filter intervals based on their start position, end
position, and internal gap count. The following `filter` script uses the
`interval` variable with the `start`, `end`, and `gaps` methods:
[source,js]
--------------------------------------------------
@ -244,12 +327,13 @@ POST _search
--------------------------------------------------
// CONSOLE
[[interval-minimization]]
==== Minimization
===== Minimization
The intervals query always minimizes intervals, to ensure that queries can
run in linear time. This can sometimes cause surprising results, particularly
when using `max_gaps` restrictions or filters. For example, take the
run in linear time. This can sometimes cause surprising results, particularly
when using `max_gaps` restrictions or filters. For example, take the
following query, searching for `salty` contained within the phrase `hot
porridge`:
@ -277,15 +361,15 @@ POST _search
--------------------------------------------------
// CONSOLE
This query will *not* match a document containing the phrase `hot porridge is
This query does *not* match a document containing the phrase `hot porridge is
salty porridge`, because the intervals returned by the match query for `hot
porridge` only cover the initial two terms in this document, and these do not
overlap the intervals covering `salty`.
Another restriction to be aware of is the case of `any_of` rules that contain
sub-rules which overlap. In particular, if one of the rules is a strict
prefix of the other, then the longer rule will never be matched, which can
cause surprises when used in combination with `max_gaps`. Consider the
sub-rules which overlap. In particular, if one of the rules is a strict
prefix of the other, then the longer rule can never match, which can
cause surprises when used in combination with `max_gaps`. Consider the
following query, searching for `the` immediately followed by `big` or `big bad`,
immediately followed by `wolf`:
@ -316,10 +400,10 @@ POST _search
--------------------------------------------------
// CONSOLE
Counter-intuitively, this query *will not* match the document `the big bad
wolf`, because the `any_of` rule in the middle will only produce intervals
Counter-intuitively, this query does *not* match the document `the big bad
wolf`, because the `any_of` rule in the middle only produces intervals
for `big` - intervals for `big bad` being longer than those for `big`, while
starting at the same position, and so being minimized away. In these cases,
starting at the same position, and so being minimized away. In these cases,
it's better to rewrite the query so that all of the options are explicitly
laid out at the top level:

View File

@ -185,15 +185,3 @@ The example above creates a boolean query:
that matches documents with the term `ny` or the conjunction `new AND york`.
By default the parameter `auto_generate_synonyms_phrase_query` is set to `true`.
.Comparison to query_string / field
**************************************************
The match family of queries does not go through a "query parsing"
process. It does not support field name prefixes, wildcard characters,
or other "advanced" features. For this reason, chances of it failing are
very small / non existent, and it provides an excellent behavior when it
comes to just analyze and run that text as a query behavior (which is
usually what a text search box does).
**************************************************

View File

@ -4,8 +4,39 @@
<titleabbrev>Query string</titleabbrev>
++++
A query that uses a query parser in order to parse its content. Here is
an example:
Returns documents based on a provided query string, using a parser with a strict
syntax.
This query uses a <<query-string-syntax,syntax>> to parse and split the provided
query string based on operators, such as `AND` or `NOT`. The query
then <<analysis,analyzes>> each split text independently before returning
matching documents.
You can use the `query_string` query to create a complex search that includes
wildcard characters, searches across multiple fields, and more. While versatile,
the query is strict and returns an error if the query string includes any
invalid syntax.
[WARNING]
====
Because it returns an error for any invalid syntax, we don't recommend using
the `query_string` query for search boxes.
If you don't need to support a query syntax, consider using the
<<query-dsl-match-query, `match`>> query. If you need the features of a query
syntax, use the <<query-dsl-simple-query-string-query,`simple_query_string`>>
query, which is less strict.
====
[[query-string-query-ex-request]]
==== Example request
When running the following search, the `query_string` query splits `(new york
city) OR (big apple)` into two parts: `new york city` and `big apple`. The
`content` field's analyzer then independently converts each part into tokens
before returning matching documents. Because the query syntax does not use
whitespace as an operator, `new york city` is passed as-is to the analyzer.
[source,js]
--------------------------------------------------
@ -13,154 +44,211 @@ GET /_search
{
"query": {
"query_string" : {
"default_field" : "content",
"query" : "this AND that OR thus"
"query" : "(new york city) OR (big apple)",
"default_field" : "content"
}
}
}
--------------------------------------------------
// CONSOLE
The `query_string` query parses the input and splits text around operators.
Each textual part is analyzed independently of each other. For instance the following query:
[[query-string-top-level-params]]
==== Top-level parameters for `query_string`
`query`::
(Required, string) Query string you wish to parse and use for search. See
<<query-string-syntax>>.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"query_string" : {
"default_field" : "content",
"query" : "(new york city) OR (big apple)" <1>
}
}
}
--------------------------------------------------
// CONSOLE
`default_field`::
+
--
(Optional, string) Default field you wish to search if no field is provided in
the query string.
<1> will be split into `new york city` and `big apple` and each part is then
analyzed independently by the analyzer configured for the field.
Defaults to the `index.query.default_field` index setting, which has a default
value of `*`. The `*` value extracts all fields that are eligible to term
queries and filters the metadata fields. All extracted fields are then combined
to build a query if no `prefix` is specified.
WARNING: Whitespaces are not considered operators, this means that `new york city`
will be passed "as is" to the analyzer configured for the field. If the field is a `keyword`
field the analyzer will create a single term `new york city` and the query builder will
use this term in the query. If you want to query each term separately you need to add explicit
operators around the terms (e.g. `new AND york AND city`).
WARNING: There is a limit on the number of fields that can be queried at once.
It is defined by the `indices.query.bool.max_clause_count`
<<search-settings,search setting>>, which defaults to 1024.
--
When multiple fields are provided it is also possible to modify how the different
field queries are combined inside each textual part using the `type` parameter.
The possible modes are described <<multi-match-types, here>> and the default is `best_fields`.
`allow_leading_wildcard`::
(Optional, boolean) If `true`, the wildcard characters `*` and `?` are allowed
as the first character of the query string. Defaults to `true`.
The `query_string` top level parameters include:
`analyze_wildcard`::
(Optional, boolean) If `true`, the query attempts to analyze wildcard terms in
the query string. Defaults to `false`.
[cols="<,<",options="header",]
|=======================================================================
|Parameter |Description
|`query` |The actual query to be parsed. See <<query-string-syntax>>.
`analyzer`::
(Optional, string) <<analysis,Analyzer>> used to convert text in the
query string into tokens. Defaults to the
<<specify-index-time-analyzer,index-time analyzer>> mapped for the
`default_field`. If no analyzer is mapped, the index's default analyzer is used.
|`default_field` |The default field for query terms if no prefix field is
specified. Defaults to the `index.query.default_field` index settings, which in
turn defaults to `*`. `*` extracts all fields in the mapping that are eligible
to term queries and filters the metadata fields. All extracted fields are then
combined to build a query when no prefix field is provided.
`auto_generate_synonyms_phrase_query`::
(Optional, boolean) If `true`, <<query-dsl-match-query-phrase,match phrase>>
queries are automatically created for multi-term synonyms. Defaults to `true`.
See <<query-string-synonyms>> for an example.
WARNING: There is a limit on the number of fields that can be queried
at once. It is defined by the `indices.query.bool.max_clause_count` <<search-settings>>
which defaults to 1024.
`boost`::
+
--
(Optional, float) Floating point number used to decrease or increase the
<<relevance-scores,relevance scores>> of the query. Defaults to `1.0`.
|`default_operator` |The default operator used if no explicit operator
is specified. For example, with a default operator of `OR`, the query
`capital of Hungary` is translated to `capital OR of OR Hungary`, and
with default operator of `AND`, the same query is translated to
`capital AND of AND Hungary`. The default value is `OR`.
Boost values are relative to the default value of `1.0`. A boost value between
`0` and `1.0` decreases the relevance score. A value greater than `1.0`
increases the relevance score.
--
|`analyzer` |The analyzer name used to analyze the query string.
`default_operator`::
+
--
(Optional, string) Default boolean logic used to interpret text in the query
string if no operators are specified. Valid values are:
|`quote_analyzer` |The name of the analyzer that is used to analyze
quoted phrases in the query string. For those parts, it overrides other
analyzers that are set using the `analyzer` parameter or the
<<search-quote-analyzer,`search_quote_analyzer`>> setting.
`OR` (Default)::
For example, a query string of `capital of Hungary` is interpreted as `capital
OR of OR Hungary`.
|`allow_leading_wildcard` |When set, `*` or `?` are allowed as the first
character. Defaults to `true`.
`AND`::
For example, a query string of `capital of Hungary` is interpreted as `capital
AND of AND Hungary`.
--
|`enable_position_increments` |Set to `true` to enable position
increments in result queries. Defaults to `true`.
`enable_position_increments`::
(Optional, boolean) If `true`, enable position increments in queries constructed
from a `query_string` search. Defaults to `true`.
|`fuzzy_max_expansions` |Controls the number of terms fuzzy queries will
expand to. Defaults to `50`
`fields`::
+
--
(Optional, array of strings) Array of fields you wish to search.
|`fuzziness` |Set the fuzziness for fuzzy queries. Defaults
to `AUTO`. See <<fuzziness>> for allowed settings.
You can use this parameter query to search across multiple fields. See
<<query-string-multi-field>>.
--
|`fuzzy_prefix_length` |Set the prefix length for fuzzy queries. Default
is `0`.
`fuzziness`::
(Optional, string) Maximum edit distance allowed for matching. See <<fuzziness>>
for valid values and more information.
|`fuzzy_transpositions` |Set to `false` to disable fuzzy transpositions (`ab` -> `ba`).
Default is `true`.
`fuzzy_max_expansions`::
(Optional, integer) Maximum number of terms to which the query expands for fuzzy
matching. Defaults to `50`.
|`phrase_slop` |Sets the default slop for phrases. If zero, then exact
phrase matches are required. Default value is `0`.
`fuzzy_prefix_length`::
(Optional, integer) Number of beginning characters left unchanged for fuzzy
matching. Defaults to `0`.
|`boost` |Sets the boost value of the query. Defaults to `1.0`.
`fuzzy_transpositions`::
(Optional, boolean) If `true`, edits for fuzzy matching include
transpositions of two adjacent characters (ab → ba). Defaults to `true`.
|`analyze_wildcard` |By default, wildcards terms in a query string are
not analyzed. By setting this value to `true`, a best effort will be
made to analyze those as well.
`lenient`::
(Optional, boolean) If `true`, format-based errors, such as providing a text
value for a <<number,numeric>> field, are ignored. Defaults to `false`.
|`max_determinized_states` |Limit on how many automaton states regexp
queries are allowed to create. This protects against too-difficult
(e.g. exponentially hard) regexps. Defaults to 10000.
`max_determinized_states`::
+
--
(Optional, integer) Maximum number of
https://en.wikipedia.org/wiki/Deterministic_finite_automaton[automaton states]
required for the query. Default is `10000`.
|`minimum_should_match` |A value controlling how many "should" clauses
in the resulting boolean query should match. It can be an absolute value
(`2`), a percentage (`30%`) or a
<<query-dsl-minimum-should-match,combination of
both>>.
{es} uses https://lucene.apache.org/core/[Apache Lucene] internally to parse
regular expressions. Lucene converts each regular expression to a finite
automaton containing a number of determinized states.
|`lenient` |If set to `true` will cause format based failures (like
providing text to a numeric field) to be ignored.
You can use this parameter to prevent that conversion from unintentionally
consuming too many resources. You may need to increase this limit to run complex
regular expressions.
--
|`time_zone` | Time Zone to be applied to any range query related to dates.
`minimum_should_match`::
(Optional, string) Minimum number of clauses that must match for a document to
be returned. See the <<query-dsl-minimum-should-match, `minimum_should_match`
parameter>> for valid values and more information. See
<<query-string-min-should-match>> for an example.
|`quote_field_suffix` | A suffix to append to fields for quoted parts of
the query string. This allows to use a field that has a different analysis chain
for exact matching. Look <<mixing-exact-search-with-stemming,here>> for a
comprehensive example.
`quote_analyzer`::
+
--
(Optional, string) <<analysis,Analyzer>> used to convert quoted text in the
query string into tokens. Defaults to the
<<search-quote-analyzer,`search_quote_analyzer`>> mapped for the
`default_field`.
|`auto_generate_synonyms_phrase_query` |Whether phrase queries should be automatically generated for multi terms synonyms.
Defaults to `true`.
For quoted text, this parameter overrides the analyzer specified in the
`analyzer` parameter.
--
|=======================================================================
`phrase_slop`::
(Optional, integer) Maximum number of positions allowed between matching tokens
for phrases. Defaults to `0`. If `0`, exact phrase matches are required.
Transposed terms have a slop of `2`.
When a multi term query is being generated, one can control how it gets
rewritten using the
<<query-dsl-multi-term-rewrite,rewrite>>
parameter.
`quote_field_suffix`::
+
--
(Optional, string) Suffix appended to quoted text in the query string.
[float]
==== Default Field
You can use this suffix to use a different analysis method for exact matches.
See <<mixing-exact-search-with-stemming>>.
--
When not explicitly specifying the field to search on in the query
string syntax, the `index.query.default_field` will be used to derive
which field to search on. If the `index.query.default_field` is not specified,
the `query_string` will automatically attempt to determine the existing fields in the index's
mapping that are queryable, and perform the search on those fields.
This will not include nested documents, use a nested query to search those documents.
`rewrite`::
(Optional, string) Method used to rewrite the query. For valid values and more
information, see the <<query-dsl-multi-term-rewrite, `rewrite` parameter>>.
NOTE: For mappings with a large number of fields, searching across all queryable
fields in the mapping could be expensive.
`time_zone`::
+
--
(Optional, string)
https://en.wikipedia.org/wiki/List_of_UTC_time_offsets[Coordinated Universal
Time (UTC) offset] or
https://en.wikipedia.org/wiki/List_of_tz_database_time_zones[IANA time zone]
used to convert `date` values in the query string to UTC.
[float]
==== Multi Field
Valid values are ISO 8601 UTC offsets, such as `+01:00` or -`08:00`, and IANA
time zone IDs, such as `America/Los_Angeles`.
The `query_string` query can also run against multiple fields. Fields can be
provided via the `fields` parameter (example below).
[NOTE]
====
The `time_zone` parameter does **not** affect the <<date-math,date math>> value
of `now`. `now` is always the current system time in UTC. However, the
`time_zone` parameter does convert dates calculated using `now` and
<<date-math,date math rounding>>. For example, the `time_zone` parameter will
convert a value of `now/d`.
====
--
[[query-string-query-notes]]
==== Notes
include::query-string-syntax.asciidoc[]
[[query-string-nested]]
====== Avoid using the `query_string` query for nested documents
`query_string` searches do not return <<nested,nested>> documents. To search
nested documents, use the <<query-dsl-nested-query, `nested` query>>.
[[query-string-multi-field]]
====== Search multiple fields
You can use the `fields` parameter to perform a `query_string` search across
multiple fields.
The idea of running the `query_string` query against multiple fields is to
expand each query term to an OR clause like this:
field1:query_term OR field2:query_term | ...
```
field1:query_term OR field2:query_term | ...
```
For example, the following query
@ -252,21 +340,6 @@ GET /_search
NOTE: Since `\` (backslash) is a special character in json strings, it needs to
be escaped, hence the two backslashes in the above `query_string`.
When running the `query_string` query against multiple fields, the
following additional parameters are allowed:
[cols="<,<",options="header",]
|=======================================================================
|Parameter |Description
|`type` |How the fields should be combined to build the text query.
See <<multi-match-types, types>> for a complete example.
Defaults to `best_fields`
|`tie_breaker` |The disjunction max tie breaker for multi fields.
Defaults to `0`
|=======================================================================
The fields parameter can also include pattern based field names,
allowing to automatically expand to the relevant fields (dynamically
introduced fields included). For example:
@ -285,8 +358,50 @@ GET /_search
--------------------------------------------------
// CONSOLE
[float]
==== Synonyms
[[query-string-multi-field-parms]]
====== Additional parameters for multiple field searches
When running the `query_string` query against multiple fields, the
following additional parameters are supported.
`type`::
+
--
(Optional, string) Determines how the query matches and scores documents. Valid
values are:
`best_fields` (Default)::
Finds documents which match any field and uses the highest
<<relevance-scores,`_score`>> from any matching field. See
<<type-best-fields>>.
`bool_prefix`::
Creates a `match_bool_prefix` query on each field and combines the `_score` from
each field. See <<type-bool-prefix>>.
`cross_fields`::
Treats fields with the same `analyzer` as though they were one big field. Looks
for each word in **any** field. See <<type-cross-fields>>.
`most_fields`::
Finds documents which match any field and combines the `_score` from each field.
See <<type-most-fields>>.
`phrase`::
Runs a `match_phrase` query on each field and uses the `_score` from the best
field. See <<type-phrase>>.
`phrase_prefix`::
Runs a `match_phrase_prefix` query on each field and uses the `_score` from the
best field. See <<type-phrase>>.
NOTE:
Additional top-level `multi_match` parameters may be available based on the
<<multi-match-types,`type`>> value.
--
[[query-string-synonyms]]
===== Synonyms and the `query_string` query
The `query_string` query supports multi-terms synonym expansion with the <<analysis-synonym-graph-tokenfilter,
synonym_graph>> token filter. When this filter is used, the parser creates a phrase query for each multi-terms synonyms.
@ -318,8 +433,8 @@ The example above creates a boolean query:
that matches documents with the term `ny` or the conjunction `new AND york`.
By default the parameter `auto_generate_synonyms_phrase_query` is set to `true`.
[float]
==== Minimum should match
[[query-string-min-should-match]]
===== How `minimum_should_match` works
The `query_string` splits the query around each operator to create a boolean
query for the entire input. You can use `minimum_should_match` to control how
@ -349,8 +464,8 @@ The example above creates a boolean query:
that matches documents with at least two of the terms `this`, `that` or `thus`
in the single field `title`.
[float]
===== Multi Field
[[query-string-min-should-match-multi]]
===== How `minimum_should_match` works for multiple fields
[source,js]
--------------------------------------------------
@ -404,8 +519,11 @@ The example above creates a boolean query:
that matches documents with at least two of the three "should" clauses, each of
them made of the disjunction max over the fields for each term.
[float]
===== Cross Field
[[query-string-min-should-match-cross]]
===== How `minimum_should_match` works for cross-field searches
A `cross_fields` value in the `type` field indicates fields with the same
analyzer are grouped together when the input is analyzed.
[source,js]
--------------------------------------------------
@ -426,13 +544,8 @@ GET /_search
--------------------------------------------------
// CONSOLE
The `cross_fields` value in the `type` field indicates that fields that have the
same analyzer should be grouped together when the input is analyzed.
The example above creates a boolean query:
`(blended(terms:[field2:this, field1:this]) blended(terms:[field2:that, field1:that]) blended(terms:[field2:thus, field1:thus]))~2`
that matches documents with at least two of the three per-term blended queries.
include::query-string-syntax.asciidoc[]

View File

@ -1,6 +1,6 @@
[[query-string-syntax]]
==== Query string syntax
===== Query string syntax
The query string ``mini-language'' is used by the
<<query-dsl-query-string-query>> and by the
@ -14,10 +14,9 @@ phrase, in the same order.
Operators allow you to customize the search -- the available options are
explained below.
===== Field names
====== Field names
As mentioned in <<query-dsl-query-string-query>>, the `default_field` is searched for the
search terms, but it is possible to specify other fields in the query syntax:
You can specify fields to search in the query syntax:
* where the `status` field contains `active`
@ -40,7 +39,7 @@ search terms, but it is possible to specify other fields in the query syntax:
_exists_:title
===== Wildcards
====== Wildcards
Wildcard searches can be run on individual terms, using `?` to replace
a single character, and `*` to replace zero or more characters:
@ -88,7 +87,7 @@ analyzed and a boolean query will be built out of the different tokens, by
ensuring exact matches on the first N-1 tokens, and prefix match on the last
token.
===== Regular expressions
====== Regular expressions
Regular expression patterns can be embedded in the query string by
wrapping them in forward-slashes (`"/"`):
@ -108,7 +107,7 @@ Elasticsearch to visit every term in the index:
Use with caution!
=======
===== Fuzziness
====== Fuzziness
We can search for terms that are
similar to, but not exactly like our search terms, using the ``fuzzy''
@ -128,7 +127,7 @@ sufficient to catch 80% of all human misspellings. It can be specified as:
quikc~1
===== Proximity searches
====== Proximity searches
While a phrase query (eg `"john smith"`) expects all of the terms in exactly
the same order, a proximity query allows the specified words to be further
@ -143,7 +142,7 @@ query string, the more relevant that document is considered to be. When
compared to the above example query, the phrase `"quick fox"` would be
considered more relevant than `"quick brown fox"`.
===== Ranges
====== Ranges
Ranges can be specified for date, numeric or string fields. Inclusive ranges
are specified with square brackets `[min TO max]` and exclusive ranges with
@ -197,7 +196,7 @@ The parsing of ranges in query strings can be complex and error prone. It is
much more reliable to use an explicit <<query-dsl-range-query,`range` query>>.
===== Boosting
====== Boosting
Use the _boost_ operator `^` to make one term more relevant than another.
For instance, if we want to find all documents about foxes, but we are
@ -212,7 +211,7 @@ Boosts can also be applied to phrases or to groups:
"john smith"^2 (foo bar)^4
===== Boolean operators
====== Boolean operators
By default, all terms are optional, as long as one term matches. A search
for `foo bar baz` will find any document that contains one or more of
@ -255,7 +254,7 @@ would look like this:
}
===== Grouping
====== Grouping
Multiple terms or clauses can be grouped together with parentheses, to form
sub-queries:
@ -267,7 +266,7 @@ of a sub-query:
status:(active OR pending) title:(full text search)^2
===== Reserved characters
====== Reserved characters
If you need to use any of the characters which function as operators in your
query itself (and not as operators), then you should escape them with
@ -283,7 +282,9 @@ NOTE: `<` and `>` can't be escaped at all. The only way to prevent them from
attempting to create a range query is to remove them from the query string
entirely.
===== Empty Query
====== Whitespaces and empty queries
Whitespace is not considered an operator.
If the query string is empty or only contains whitespaces the query will
yield an empty result set.

View File

@ -4,10 +4,21 @@
<titleabbrev>Simple query string</titleabbrev>
++++
A query that uses the SimpleQueryParser to parse its context. Unlike the
regular `query_string` query, the `simple_query_string` query will never
throw an exception, and discards invalid parts of the query. Here is
an example:
Returns documents based on a provided query string, using a parser with a
limited but fault-tolerant syntax.
This query uses a <<simple-query-string-syntax,simple syntax>> to parse and
split the provided query string into terms based on special operators. The query
then <<analysis,analyzes>> each term independently before returning matching
documents.
While its syntax is more limited than the
<<query-dsl-query-string-query,`query_string` query>>, the `simple_query_string`
query does not return errors for invalid syntax. Instead, it ignores any invalid
parts of the query string.
[[simple-query-string-query-ex-request]]
==== Example request
[source,js]
--------------------------------------------------
@ -24,72 +35,108 @@ GET /_search
--------------------------------------------------
// CONSOLE
The `simple_query_string` top level parameters include:
[cols="<,<",options="header",]
|=======================================================================
|Parameter |Description
|`query` |The actual query to be parsed. See below for syntax.
[[simple-query-string-top-level-params]]
==== Top-level parameters for `simple_query_string`
|`fields` |The fields to perform the parsed query against. Defaults to the
`index.query.default_field` index settings, which in turn defaults to `*`. `*`
extracts all fields in the mapping that are eligible to term queries and filters
the metadata fields.
`query`::
(Required, string) Query string you wish to parse and use for search. See <<simple-query-string-syntax>>.
WARNING: There is a limit on the number of fields that can be queried
at once. It is defined by the `indices.query.bool.max_clause_count` <<search-settings>>
which defaults to 1024.
`fields`::
+
--
(Optional, array of strings) Array of fields you wish to search.
|`default_operator` |The default operator used if no explicit operator
is specified. For example, with a default operator of `OR`, the query
`capital of Hungary` is translated to `capital OR of OR Hungary`, and
with default operator of `AND`, the same query is translated to
`capital AND of AND Hungary`. The default value is `OR`.
This field accepts wildcard expressions. You also can boost relevance scores for
matches to particular fields using a caret (`^`) notation. See
<<simple-query-string-boost>> for examples.
|`analyzer` |Force the analyzer to use to analyze each term of the query when
creating composite queries.
Defaults to the `index.query.default_field` index setting, which has a default
value of `*`. The `*` value extracts all fields that are eligible to term
queries and filters the metadata fields. All extracted fields are then combined
to build a query if no `prefix` is specified.
|`flags` |A set of <<supported-flags,flags>> specifying which features of the
`simple_query_string` to enable. Defaults to `ALL`.
WARNING: There is a limit on the number of fields that can be queried at once.
It is defined by the `indices.query.bool.max_clause_count`
<<search-settings,search setting>>, which defaults to `1024`.
--
|`analyze_wildcard` | Whether terms of prefix queries should be automatically
analyzed or not. If `true` a best effort will be made to analyze the prefix. However,
some analyzers will be not able to provide a meaningful results
based just on the prefix of a term. Defaults to `false`.
`default_operator`::
+
--
(Optional, string) Default boolean logic used to interpret text in the query
string if no operators are specified. Valid values are:
|`lenient` | If set to `true` will cause format based failures
(like providing text to a numeric field) to be ignored.
`OR` (Default)::
For example, a query string of `capital of Hungary` is interpreted as `capital
OR of OR Hungary`.
|`minimum_should_match` | The minimum number of clauses that must match for a
document to be returned. See the
<<query-dsl-minimum-should-match,`minimum_should_match`>> documentation for the
full list of options.
`AND`::
For example, a query string of `capital of Hungary` is interpreted as `capital
AND of AND Hungary`.
--
|`quote_field_suffix` | A suffix to append to fields for quoted parts of
the query string. This allows to use a field that has a different analysis chain
for exact matching. Look <<mixing-exact-search-with-stemming,here>> for a
comprehensive example.
`all_fields`::
deprecated:[6.0.0, set `fields` to `*` instead](Optional, boolean) If `true`,
search all searchable fields in the index's field mapping.
|`auto_generate_synonyms_phrase_query` |Whether phrase queries should be automatically generated for multi terms synonyms.
Defaults to `true`.
`analyze_wildcard`::
(Optional, boolean) If `true`, the query attempts to analyze wildcard terms in
the query string. Defaults to `false`.
|`all_fields` | deprecated[6.0.0, set `fields` to `*` instead]
Perform the query on all fields detected in the mapping that can
be queried.
`analyzer`::
(Optional, string) <<analysis,Analyzer>> used to convert text in the
query string into tokens. Defaults to the
<<specify-index-time-analyzer,index-time analyzer>> mapped for the
`default_field`. If no analyzer is mapped, the index's default analyzer is used.
|`fuzzy_prefix_length` |Set the prefix length for fuzzy queries. Default
is `0`.
`auto_generate_synonyms_phrase_query`::
(Optional, boolean) If `true`, <<query-dsl-match-query-phrase,match phrase>>
queries are automatically created for multi-term synonyms. Defaults to `true`.
See <<simple-query-string-synonyms>> for an example.
|`fuzzy_max_expansions` |Controls the number of terms fuzzy queries will
expand to. Defaults to `50`
`flags`::
(Optional, string) List of enabled operators for the
<<simple-query-string-syntax,simple query string syntax>>. Defaults to `ALL`
(all operators). See <<supported-flags>> for valid values.
|`fuzzy_transpositions` |Set to `false` to disable fuzzy transpositions (`ab` -> `ba`).
Default is `true`.
|=======================================================================
`fuzzy_max_expansions`::
(Optional, integer) Maximum number of terms to which the query expands for fuzzy
matching. Defaults to `50`.
[float]
===== Simple Query String Syntax
The `simple_query_string` supports the following special characters:
`fuzzy_prefix_length`::
(Optional, integer) Number of beginning characters left unchanged for fuzzy
matching. Defaults to `0`.
`fuzzy_transpositions`::
(Optional, boolean) If `true`, edits for fuzzy matching include
transpositions of two adjacent characters (ab → ba). Defaults to `true`.
`lenient`::
(Optional, boolean) If `true`, format-based errors, such as providing a text
value for a <<number,numeric>> field, are ignored. Defaults to `false`.
`minimum_should_match`::
(Optional, string) Minimum number of clauses that must match for a document to
be returned. See the <<query-dsl-minimum-should-match, `minimum_should_match`
parameter>> for valid values and more information.
`quote_field_suffix`::
+
--
(Optional, string) Suffix appended to quoted text in the query string.
You can use this suffix to use a different analysis method for exact matches.
See <<mixing-exact-search-with-stemming>>.
--
[[simple-query-string-query-notes]]
==== Notes
[[simple-query-string-syntax]]
===== Simple query string syntax
The `simple_query_string` query supports the following operators:
* `+` signifies AND operation
* `|` signifies OR operation
@ -100,11 +147,11 @@ The `simple_query_string` supports the following special characters:
* `~N` after a word signifies edit distance (fuzziness)
* `~N` after a phrase signifies slop amount
In order to search for any of these special characters, they will need to
be escaped with `\`.
To use one of these characters literally, escape it with a preceding backslash
(`\`).
Be aware that this syntax may have a different behavior depending on the
`default_operator` value. For example, consider the following query:
The behavior of these operators may differ depending on the `default_operator`
value. For example:
[source,js]
--------------------------------------------------
@ -120,47 +167,20 @@ GET /_search
--------------------------------------------------
// CONSOLE
You may expect that documents containing only "foo" or "bar" will be returned,
as long as they do not contain "baz", however, due to the `default_operator`
being OR, this really means "match documents that contain "foo" or documents
that contain "bar", or documents that don't contain "baz". If this is unintended
then the query can be switched to `"foo bar +-baz"` which will not return
documents that contain "baz".
This search is intended to only return documents containing `foo` or `bar` that
also do **not** contain `baz`. However because of a `default_operator` of `OR`,
this search actually returns documents that contain `foo` or `bar` and any
documents that don't contain `baz`. To return documents as intended, change the
query string to `foo bar +-baz`.
[float]
==== Default Field
When not explicitly specifying the field to search on in the query
string syntax, the `index.query.default_field` will be used to derive
which fields to search on. It defaults to `*` and the query will automatically
attempt to determine the existing fields in the index's mapping that are queryable,
and perform the search on those fields.
[float]
==== Multi Field
The fields parameter can also include pattern based field names,
allowing to automatically expand to the relevant fields (dynamically
introduced fields included). For example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"simple_query_string" : {
"fields" : ["content", "name.*^5"],
"query" : "foo bar baz"
}
}
}
--------------------------------------------------
// CONSOLE
[float]
[[supported-flags]]
==== Flags
`simple_query_string` support multiple flags to specify which parsing features
should be enabled. It is specified as a `|`-delimited string with the
`flags` parameter:
===== Limit operators
You can use the `flags` parameter to limit the supported operators for the
simple query string syntax.
To explicitly enable only specific operators, use a `|` separator. For example,
a `flags` value of `OR|AND|PREFIX` disables all operators except `OR`, `AND`,
and `PREFIX`.
[source,js]
--------------------------------------------------
@ -176,28 +196,92 @@ GET /_search
--------------------------------------------------
// CONSOLE
[[supported-flags-values]]
====== Valid values
The available flags are:
[cols="<,<",options="header",]
|=======================================================================
|Flag |Description
|`ALL` |Enables all parsing features. This is the default.
|`NONE` |Switches off all parsing features.
|`AND` |Enables the `+` AND operator.
|`OR` |Enables the `\|` OR operator.
|`NOT` |Enables the `-` NOT operator.
|`PREFIX` |Enables the `*` Prefix operator.
|`PHRASE` |Enables the `"` quotes operator used to search for phrases.
|`PRECEDENCE` |Enables the `(` and `)` operators to control operator precedence.
|`ESCAPE` |Enables `\` as the escape character.
|`WHITESPACE` |Enables whitespaces as split characters.
|`FUZZY` |Enables the `~N` operator after a word where N is an integer denoting the allowed edit distance for matching (see <<fuzziness>>).
|`SLOP` |Enables the `~N` operator after a phrase where N is an integer denoting the slop amount.
|`NEAR` |Synonymous to `SLOP`.
|=======================================================================
`ALL` (Default)::
Enables all optional operators.
[float]
==== Synonyms
`AND`::
Enables the `+` AND operator.
`ESCAPE`::
Enables `\` as an escape character.
`FUZZY`::
Enables the `~N` operator after a word, where `N` is an integer denoting the
allowed edit distance for matching. See <<fuzziness>>.
`NEAR`::
Enables the `~N` operator, after a phrase where `N` is the maximum number of
positions allowed between matching tokens. Synonymous to `SLOP`.
`NONE`::
Disables all operators.
`NOT`::
Enables the `-` NOT operator.
`OR`::
Enables the `\|` OR operator.
`PHRASE`::
Enables the `"` quotes operator used to search for phrases.
`PRECEDENCE`::
Enables the `(` and `)` operators to control operator precedence.
`PREFIX`::
Enables the `*` prefix operator.
`SLOP`::
Enables the `~N` operator, after a phrase where `N` is maximum number of
positions allowed between matching tokens. Synonymous to `NEAR`.
`WHITESPACE`::
Enables whitespace as split characters.
[[simple-query-string-boost]]
===== Wildcards and per-field boosts in the `fields` parameter
Fields can be specified with wildcards, eg:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"simple_query_string" : {
"query": "Will Smith",
"fields": [ "title", "*_name" ] <1>
}
}
}
--------------------------------------------------
// CONSOLE
<1> Query the `title`, `first_name` and `last_name` fields.
Individual fields can be boosted with the caret (`^`) notation:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"simple_query_string" : {
"query" : "this is a test",
"fields" : [ "subject^3", "message" ] <1>
}
}
}
--------------------------------------------------
// CONSOLE
<1> The `subject` field is three times as important as the `message` field.
[[simple-query-string-synonyms]]
===== Synonyms
The `simple_query_string` query supports multi-terms synonym expansion with the <<analysis-synonym-graph-tokenfilter,
synonym_graph>> token filter. When this filter is used, the parser creates a phrase query for each multi-terms synonyms.

View File

@ -101,7 +101,7 @@ to be `none`.
[source,yaml]
----
script.allowed_contexts: search, update <1>
script.allowed_contexts: score, update <1>
----
<1> This will allow only search and update scripts to be executed but not
<1> This will allow only scoring and update scripts to be executed but not
aggs or plugin scripts (or any other contexts).

View File

@ -50,6 +50,13 @@ During the second phase, the coordinating node requests the document
content (and highlighted snippets, if any) from *only the relevant
shards*.
[source,js]
--------------------------------------------------
GET twitter/_search?search_type=query_then_fetch
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
NOTE: This is the default setting, if you do not specify a `search_type`
in your request.
@ -62,4 +69,9 @@ Same as "Query Then Fetch", except for an initial scatter phase which
goes and computes the distributed term frequencies for more accurate
scoring.
[source,js]
--------------------------------------------------
GET twitter/_search?search_type=dfs_query_then_fetch
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]

View File

@ -273,12 +273,12 @@ logging.
The following is a list of additional parameters that can be configured for a
particular connection, in case the default behavior of the driver is not
suitable. This can be done within the client application, in a manner
particular to that application, generally in a free text input box (sometimes
named "Connection string", "String extras", or similar). The format of the
string is `Attribute1=Value1`. Multiple attributes can be specified, separated
by a semicolon `Attribute1=Value1;Attribute2=Value2;`. The attribute names are
given below.
suitable. For earlier versions of the driver, this needs to be done within the
client application, in a manner particular to that application, generally in a
free text input box (sometimes named "Connection string", "String extras", or
similar). The format of the string is `Attribute1=Value1`. Multiple attributes
can be specified, separated by a semicolon
`Attribute1=Value1;Attribute2=Value2;`. The attribute names are given below.
`Timeout` (default: `0`)::
The maximum time (in seconds) a request to the server can take. This can be
@ -354,3 +354,18 @@ it as the value for the column. If not set, the server will return an error.
This corresponds to {es-sql}'s request parameter `field_multi_value_leniency`
(see <<sql-rest-fields>>).
`AutoEscapePVA` (default: `true`)::
The pattern-value arguments make use of `_` and `%` as special characters to
build patern matching values. Some applications however use these chars as
regular ones, which can lead to {es-sql} returning more data than the app
intended. With the auto escaping, the driver will inspect the arguments and
will escape these special characters if not already done by the application.
`IndexIncludeFrozen` (default: `false`)::
If this parameter is `true`, the server will include the frozen indices in the
query execution.
This corresponds to {es-sql}'s request parameter `index_include_frozen`
(see <<sql-rest-fields>>).

View File

@ -8,14 +8,15 @@ The {odbc} can be installed on Microsoft Windows using an MSI package. The insta
[[prerequisites]]
==== Installation Prerequisites
The recommended installation platform is Windows 10 64 bit _or_ Windows Server 2016 64 bit.
Before you install the {odbc} you need to meet the following prerequisites;
* Windows 10 64 bit _or_ Windows Server 2016 64 bit operating system
* .NET Framework 4.0 full - https://www.microsoft.com/en-au/download/details.aspx?id=17718
* Microsoft Visual C++ Redistributable for Visual Studio 2017 - https://support.microsoft.com/en-au/help/2977003/the-latest-supported-visual-c-downloads
- The 64 bit driver requires the x64 redistributable (this also installs the components needed for the 32 bit driver)
- The 32 bit driver requires the x86 redistributable
* Elevated privileges (administrator) for the User performing the installation
- The 64 bit driver requires the x64 redistributable
- The 32 bit driver requires the x86 or the x64 redistributable (the latter also installs the components needed for the 32 bit driver)
* Elevated privileges (administrator) for the User performing the installation.
If you fail to meet any of the prerequisites the installer will show an error message and abort the installation.

View File

@ -29,6 +29,9 @@ PUT my_index
},
"my_sparse_vector" : {
"type" : "sparse_vector"
},
"status" : {
"type" : "keyword"
}
}
}
@ -37,13 +40,15 @@ PUT my_index
PUT my_index/_doc/1
{
"my_dense_vector": [0.5, 10, 6],
"my_sparse_vector": {"2": 1.5, "15" : 2, "50": -1.1, "4545": 1.1}
"my_sparse_vector": {"2": 1.5, "15" : 2, "50": -1.1, "4545": 1.1},
"status" : "published"
}
PUT my_index/_doc/2
{
"my_dense_vector": [-0.5, 10, 10],
"my_sparse_vector": {"2": 2.5, "10" : 1.3, "55": -2.3, "113": 1.6}
"my_sparse_vector": {"2": 2.5, "10" : 1.3, "55": -2.3, "113": 1.6},
"status" : "published"
}
--------------------------------------------------
@ -59,13 +64,19 @@ GET my_index/_search
{
"query": {
"script_score": {
"query": {
"match_all": {}
"query" : {
"bool" : {
"filter" : {
"term" : {
"status" : "published" <1>
}
}
}
},
"script": {
"source": "cosineSimilarity(params.query_vector, doc['my_dense_vector']) + 1.0", <1>
"source": "cosineSimilarity(params.query_vector, doc['my_dense_vector']) + 1.0", <2>
"params": {
"query_vector": [4, 3.4, -0.2] <2>
"query_vector": [4, 3.4, -0.2] <3>
}
}
}
@ -73,8 +84,9 @@ GET my_index/_search
}
--------------------------------------------------
// CONSOLE
<1> The script adds 1.0 to the cosine similarity to prevent the score from being negative.
<2> To take advantage of the script optimizations, provide a query vector as a script parameter.
<1> To restrict the number of documents on which script score calculation is applied, provide a filter.
<2> The script adds 1.0 to the cosine similarity to prevent the score from being negative.
<3> To take advantage of the script optimizations, provide a query vector as a script parameter.
NOTE: If a document's dense vector field has a number of dimensions
different from the query's vector, an error will be thrown.
@ -88,8 +100,14 @@ GET my_index/_search
{
"query": {
"script_score": {
"query": {
"match_all": {}
"query" : {
"bool" : {
"filter" : {
"term" : {
"status" : "published"
}
}
}
},
"script": {
"source": "cosineSimilaritySparse(params.query_vector, doc['my_sparse_vector']) + 1.0",
@ -112,8 +130,14 @@ GET my_index/_search
{
"query": {
"script_score": {
"query": {
"match_all": {}
"query" : {
"bool" : {
"filter" : {
"term" : {
"status" : "published"
}
}
}
},
"script": {
"source": """
@ -141,8 +165,14 @@ GET my_index/_search
{
"query": {
"script_score": {
"query": {
"match_all": {}
"query" : {
"bool" : {
"filter" : {
"term" : {
"status" : "published"
}
}
}
},
"script": {
"source": """
@ -169,8 +199,14 @@ GET my_index/_search
{
"query": {
"script_score": {
"query": {
"match_all": {}
"query" : {
"bool" : {
"filter" : {
"term" : {
"status" : "published"
}
}
}
},
"script": {
"source": "1 / (1 + l1norm(params.queryVector, doc['my_dense_vector']))", <1>
@ -202,8 +238,14 @@ GET my_index/_search
{
"query": {
"script_score": {
"query": {
"match_all": {}
"query" : {
"bool" : {
"filter" : {
"term" : {
"status" : "published"
}
}
}
},
"script": {
"source": "1 / (1 + l1normSparse(params.queryVector, doc['my_sparse_vector']))",
@ -227,8 +269,14 @@ GET my_index/_search
{
"query": {
"script_score": {
"query": {
"match_all": {}
"query" : {
"bool" : {
"filter" : {
"term" : {
"status" : "published"
}
}
}
},
"script": {
"source": "1 / (1 + l2norm(params.queryVector, doc['my_dense_vector']))",
@ -251,8 +299,14 @@ GET my_index/_search
{
"query": {
"script_score": {
"query": {
"match_all": {}
"query" : {
"bool" : {
"filter" : {
"term" : {
"status" : "published"
}
}
}
},
"script": {
"source": "1 / (1 + l2normSparse(params.queryVector, doc['my_sparse_vector']))",

View File

@ -149,6 +149,10 @@ public enum XContentType {
return type;
}
}
// we also support newline delimited JSON: http://specs.okfnlabs.org/ndjson/
if (lowercaseMediaType.toLowerCase(Locale.ROOT).equals("application/x-ndjson")) {
return XContentType.JSON;
}
return null;
}

View File

@ -29,7 +29,6 @@ import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.BaseRestHandler;
@ -117,8 +116,7 @@ public class GrokProcessorGetAction extends ActionType<GrokProcessorGetAction.Re
}
public static class RestAction extends BaseRestHandler {
RestAction(Settings settings, RestController controller) {
super(settings);
RestAction(RestController controller) {
controller.registerHandler(GET, "/_ingest/processor/grok", this);
}

View File

@ -101,7 +101,7 @@ public class IngestCommonPlugin extends Plugin implements ActionPlugin, IngestPl
IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter,
IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<DiscoveryNodes> nodesInCluster) {
return Arrays.asList(new GrokProcessorGetAction.RestAction(settings, restController));
return Arrays.asList(new GrokProcessorGetAction.RestAction(restController));
}
@Override

View File

@ -59,8 +59,8 @@ public class MustachePlugin extends Plugin implements ScriptPlugin, ActionPlugin
IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<DiscoveryNodes> nodesInCluster) {
return Arrays.asList(
new RestSearchTemplateAction(settings, restController),
new RestSearchTemplateAction(restController),
new RestMultiSearchTemplateAction(settings, restController),
new RestRenderSearchTemplateAction(settings, restController));
new RestRenderSearchTemplateAction(restController));
}
}

View File

@ -58,7 +58,6 @@ public class RestMultiSearchTemplateAction extends BaseRestHandler {
private final boolean allowExplicitIndex;
public RestMultiSearchTemplateAction(Settings settings, RestController controller) {
super(settings);
this.allowExplicitIndex = MULTI_ALLOW_EXPLICIT_INDEX.get(settings);
controller.registerHandler(GET, "/_msearch/template", this);

View File

@ -20,7 +20,6 @@
package org.elasticsearch.script.mustache;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestController;
@ -34,8 +33,8 @@ import static org.elasticsearch.rest.RestRequest.Method.GET;
import static org.elasticsearch.rest.RestRequest.Method.POST;
public class RestRenderSearchTemplateAction extends BaseRestHandler {
public RestRenderSearchTemplateAction(Settings settings, RestController controller) {
super(settings);
public RestRenderSearchTemplateAction(RestController controller) {
controller.registerHandler(GET, "/_render/template", this);
controller.registerHandler(POST, "/_render/template", this);
controller.registerHandler(GET, "/_render/template/{id}", this);

View File

@ -21,7 +21,6 @@ package org.elasticsearch.script.mustache;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestController;
@ -47,9 +46,7 @@ public class RestSearchTemplateAction extends BaseRestHandler {
RESPONSE_PARAMS = Collections.unmodifiableSet(responseParams);
}
public RestSearchTemplateAction(Settings settings, RestController controller) {
super(settings);
public RestSearchTemplateAction(RestController controller) {
controller.registerHandler(GET, "/_search/template", this);
controller.registerHandler(POST, "/_search/template", this);
controller.registerHandler(GET, "/{index}/_search/template", this);

View File

@ -18,7 +18,6 @@
*/
package org.elasticsearch.script.mustache;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.test.rest.FakeRestRequest;
@ -32,7 +31,7 @@ public class RestSearchTemplateActionTests extends RestActionTestCase {
@Before
public void setUpAction() {
new RestSearchTemplateAction(Settings.EMPTY, controller());
new RestSearchTemplateAction(controller());
}
public void testTypeInPath() {

View File

@ -208,8 +208,8 @@ final class Compiler {
*/
Constructor<?> compile(Loader loader, MainMethodReserved reserved, String name, String source, CompilerSettings settings) {
ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, scriptClass);
SSource root = Walker.buildPainlessTree(scriptClassInfo, reserved, name, source, settings, painlessLookup,
null);
SSource root = Walker.buildPainlessTree(scriptClassInfo, reserved, name, source, settings, painlessLookup, null);
root.storeSettings(settings);
root.analyze(painlessLookup);
Map<String, Object> statics = root.write();
@ -240,6 +240,7 @@ final class Compiler {
ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, scriptClass);
SSource root = Walker.buildPainlessTree(scriptClassInfo, new MainMethodReserved(), name, source, settings, painlessLookup,
debugStream);
root.storeSettings(settings);
root.analyze(painlessLookup);
root.write();

View File

@ -142,8 +142,8 @@ public final class PainlessPlugin extends Plugin implements ScriptPlugin, Extens
IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<DiscoveryNodes> nodesInCluster) {
List<RestHandler> handlers = new ArrayList<>();
handlers.add(new PainlessExecuteAction.RestAction(settings, restController));
handlers.add(new PainlessContextAction.RestAction(settings, restController));
handlers.add(new PainlessExecuteAction.RestAction(restController));
handlers.add(new PainlessContextAction.RestAction(restController));
return handlers;
}
}

View File

@ -32,7 +32,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.painless.PainlessScriptEngine;
@ -195,8 +194,7 @@ public class PainlessContextAction extends ActionType<PainlessContextAction.Resp
public static class RestAction extends BaseRestHandler {
public RestAction(Settings settings, RestController controller) {
super(settings);
public RestAction(RestController controller) {
controller.registerHandler(GET, "/_scripts/painless/_context", this);
}

View File

@ -51,7 +51,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
@ -583,8 +582,7 @@ public class PainlessExecuteAction extends ActionType<PainlessExecuteAction.Resp
public static class RestAction extends BaseRestHandler {
public RestAction(Settings settings, RestController controller) {
super(settings);
public RestAction(RestController controller) {
controller.registerHandler(GET, "/_scripts/painless/_execute", this);
controller.registerHandler(POST, "/_scripts/painless/_execute", this);
}

View File

@ -253,7 +253,7 @@ public final class Walker extends PainlessParserBaseVisitor<ANode> {
statements.add((AStatement)visit(statement));
}
return new SSource(scriptClassInfo, settings, sourceName, sourceText, debugStream,
return new SSource(scriptClassInfo, sourceName, sourceText, debugStream,
(MainMethodReserved)reserved.pop(), location(ctx), functions, statements);
}
@ -319,8 +319,6 @@ public final class Walker extends PainlessParserBaseVisitor<ANode> {
@Override
public ANode visitWhile(WhileContext ctx) {
reserved.peek().setMaxLoopCounter(settings.getMaxLoopCounter());
AExpression expression = (AExpression)visit(ctx.expression());
if (ctx.trailer() != null) {
@ -336,8 +334,6 @@ public final class Walker extends PainlessParserBaseVisitor<ANode> {
@Override
public ANode visitDo(DoContext ctx) {
reserved.peek().setMaxLoopCounter(settings.getMaxLoopCounter());
AExpression expression = (AExpression)visit(ctx.expression());
SBlock block = (SBlock)visit(ctx.block());
@ -346,8 +342,6 @@ public final class Walker extends PainlessParserBaseVisitor<ANode> {
@Override
public ANode visitFor(ForContext ctx) {
reserved.peek().setMaxLoopCounter(settings.getMaxLoopCounter());
ANode initializer = ctx.initializer() == null ? null : visit(ctx.initializer());
AExpression expression = ctx.expression() == null ? null : (AExpression)visit(ctx.expression());
AExpression afterthought = ctx.afterthought() == null ? null : (AExpression)visit(ctx.afterthought());
@ -365,8 +359,6 @@ public final class Walker extends PainlessParserBaseVisitor<ANode> {
@Override
public ANode visitEach(EachContext ctx) {
reserved.peek().setMaxLoopCounter(settings.getMaxLoopCounter());
String type = ctx.decltype().getText();
String name = ctx.ID().getText();
AExpression expression = (AExpression)visit(ctx.expression());
@ -377,8 +369,6 @@ public final class Walker extends PainlessParserBaseVisitor<ANode> {
@Override
public ANode visitIneach(IneachContext ctx) {
reserved.peek().setMaxLoopCounter(settings.getMaxLoopCounter());
String name = ctx.ID().getText();
AExpression expression = (AExpression)visit(ctx.expression());
SBlock block = (SBlock)visit(ctx.trailer());

View File

@ -19,6 +19,7 @@
package org.elasticsearch.painless.node;
import org.elasticsearch.painless.CompilerSettings;
import org.elasticsearch.painless.Globals;
import org.elasticsearch.painless.Locals;
import org.elasticsearch.painless.Location;
@ -53,6 +54,11 @@ public abstract class ANode {
this.location = Objects.requireNonNull(location);
}
/**
* Store settings required for future compiler passes.
*/
abstract void storeSettings(CompilerSettings settings);
/**
* Adds all variable names referenced to the variable set.
* <p>

Some files were not shown because too many files have changed in this diff Show More