Build: Rework integ test setup and shutdown to ensure stop runs when desired (#23304)

Gradle's finalizedBy on tasks only ensures one task runs after another,
but not immediately after. This is problematic for our integration tests
since it allows multiple project's integ test clusters to be
simultaneously. While this has not been a problem thus far (gradle 2.13
happened to keep the finalizedBy tasks close enough that no clusters
were running in parallel), with gradle 3.3 the task graph generation has
changed, and numerous clusters may be running simultaneously, causing
memory pressure, and thus generally slower tests, or even failure if the
system has a limited amount of memory (eg in a vagrant host).

This commit reworks how integ tests are configured. It adds an
`integTestCluster` extension to gradle which is equivalent to the current
`integTest.cluster` and moves the rest test runner task to
`integTestRunner`.  The `integTest` task is then just a dummy task,
which depends on the cluster runner task, as well as the cluster stop
task. This means running `integTest` in one project will both run the
rest tests, and shut down the cluster, before running `integTest` in
another project.
This commit is contained in:
Ryan Ernst 2017-02-22 12:43:15 -08:00 committed by GitHub
parent 77d641216a
commit 175bda64a0
26 changed files with 241 additions and 261 deletions

View File

@ -37,10 +37,7 @@ apply plugin: 'application'
archivesBaseName = 'elasticsearch-benchmarks'
mainClassName = 'org.openjdk.jmh.Main'
// never try to invoke tests on the benchmark project - there aren't any
check.dependsOn.remove(test)
// explicitly override the test task too in case somebody invokes 'gradle test' so it won't trip
task test(type: Test, overwrite: true)
test.enabled = false
dependencies {
compile("org.elasticsearch:elasticsearch:${version}") {
@ -59,7 +56,6 @@ compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-u
// enable the JMH's BenchmarkProcessor to generate the final benchmark classes
// needs to be added separately otherwise Gradle will quote it and javac will fail
compileJava.options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"])
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
forbiddenApis {
// classes generated by JMH can use all sorts of forbidden APIs but we have no influence at all and cannot exclude these classes

View File

@ -51,16 +51,12 @@ class ClusterFormationTasks {
*
* Returns a list of NodeInfo objects for each node in the cluster.
*/
static List<NodeInfo> setup(Project project, Task task, ClusterConfiguration config) {
if (task.getEnabled() == false) {
// no need to add cluster formation tasks if the task won't run!
return
}
static List<NodeInfo> setup(Project project, String prefix, Task runner, ClusterConfiguration config) {
File sharedDir = new File(project.buildDir, "cluster/shared")
// first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything
// in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk
// such that snapshots survive failures / test runs and there is no simple way today to fix that.
Task cleanup = project.tasks.create(name: "${task.name}#prepareCluster.cleanShared", type: Delete, dependsOn: task.dependsOn.collect()) {
Task cleanup = project.tasks.create(name: "${prefix}#prepareCluster.cleanShared", type: Delete, dependsOn: runner.dependsOn.collect()) {
delete sharedDir
doLast {
sharedDir.mkdirs()
@ -75,7 +71,7 @@ class ClusterFormationTasks {
throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0")
}
// this is our current version distribution configuration we use for all kinds of REST tests etc.
String distroConfigName = "${task.name}_elasticsearchDistro"
String distroConfigName = "${prefix}_elasticsearchDistro"
Configuration currentDistro = project.configurations.create(distroConfigName)
configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch)
if (config.bwcVersion != null && config.numBwcNodes > 0) {
@ -89,7 +85,7 @@ class ClusterFormationTasks {
}
configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion)
for (Map.Entry<String, Project> entry : config.plugins.entrySet()) {
configureBwcPluginDependency("${task.name}_elasticsearchBwcPlugins", project, entry.getValue(),
configureBwcPluginDependency("${prefix}_elasticsearchBwcPlugins", project, entry.getValue(),
project.configurations.elasticsearchBwcPlugins, config.bwcVersion)
}
project.configurations.elasticsearchBwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
@ -104,13 +100,13 @@ class ClusterFormationTasks {
elasticsearchVersion = config.bwcVersion
distro = project.configurations.elasticsearchBwcDistro
}
NodeInfo node = new NodeInfo(config, i, project, task, elasticsearchVersion, sharedDir)
NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir)
nodes.add(node)
startTasks.add(configureNode(project, task, cleanup, node, distro, nodes.get(0)))
startTasks.add(configureNode(project, prefix, runner, cleanup, node, distro, nodes.get(0)))
}
Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks)
task.dependsOn(wait)
Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks)
runner.dependsOn(wait)
return nodes
}
@ -150,58 +146,58 @@ class ClusterFormationTasks {
*
* @return a task which starts the node.
*/
static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) {
static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) {
// tasks are chained so their execution order is maintained
Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: dependsOn) {
Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) {
delete node.homeDir
delete node.cwd
doLast {
node.cwd.mkdirs()
}
}
setup = configureCheckPreviousTask(taskName(task, node, 'checkPrevious'), project, setup, node)
setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node)
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration)
setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node, seedNode)
setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node)
setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node)
setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, configuration)
setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode)
if (node.config.plugins.isEmpty() == false) {
if (node.nodeVersion == VersionProperties.elasticsearch) {
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node)
} else {
setup = configureCopyBwcPluginsTask(taskName(task, node, 'copyBwcPlugins'), project, setup, node)
setup = configureCopyBwcPluginsTask(taskName(prefix, node, 'copyBwcPlugins'), project, setup, node)
}
}
// install modules
for (Project module : node.config.modules) {
String actionName = pluginTaskName('install', module.name, 'Module')
setup = configureInstallModuleTask(taskName(task, node, actionName), project, setup, node, module)
setup = configureInstallModuleTask(taskName(prefix, node, actionName), project, setup, node, module)
}
// install plugins
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue())
setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, plugin.getValue())
}
// sets up any extra config files that need to be copied over to the ES instance;
// its run after plugins have been installed, as the extra config files may belong to plugins
setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
setup = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, setup, node)
// extra setup commands
for (Map.Entry<String, Object[]> command : node.config.setupCommands.entrySet()) {
// the first argument is the actual script name, relative to home
Object[] args = command.getValue().clone()
args[0] = new File(node.homeDir, args[0].toString())
setup = configureExecTask(taskName(task, node, command.getKey()), project, setup, node, args)
setup = configureExecTask(taskName(prefix, node, command.getKey()), project, setup, node, args)
}
Task start = configureStartTask(taskName(task, node, 'start'), project, setup, node)
Task start = configureStartTask(taskName(prefix, node, 'start'), project, setup, node)
if (node.config.daemonize) {
Task stop = configureStopTask(taskName(task, node, 'stop'), project, [], node)
Task stop = configureStopTask(taskName(prefix, node, 'stop'), project, [], node)
// if we are running in the background, make sure to stop the server when the task completes
task.finalizedBy(stop)
runner.finalizedBy(stop)
start.finalizedBy(stop)
}
return start
@ -648,11 +644,11 @@ class ClusterFormationTasks {
}
/** Returns a unique task name for this task and node configuration */
static String taskName(Task parentTask, NodeInfo node, String action) {
static String taskName(String prefix, NodeInfo node, String action) {
if (node.config.numNodes > 1) {
return "${parentTask.name}#node${node.nodeNum}.${action}"
return "${prefix}#node${node.nodeNum}.${action}"
} else {
return "${parentTask.name}#${action}"
return "${prefix}#${action}"
}
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.gradle.test
import org.apache.tools.ant.taskdefs.condition.Os
import org.gradle.api.InvalidUserDataException
import org.gradle.api.Project
import org.gradle.api.Task
/**
* A container for the files and configuration associated with a single node in a test cluster.
@ -96,17 +95,17 @@ class NodeInfo {
/** the version of elasticsearch that this node runs */
String nodeVersion
/** Creates a node to run as part of a cluster for the given task */
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task, String nodeVersion, File sharedDir) {
/** Holds node configuration for part of a test cluster. */
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, String nodeVersion, File sharedDir) {
this.config = config
this.nodeNum = nodeNum
this.sharedDir = sharedDir
if (config.clusterName != null) {
clusterName = config.clusterName
} else {
clusterName = "${task.path.replace(':', '_').substring(1)}"
clusterName = project.path.replace(':', '_').substring(1) + '_' + prefix
}
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}")
pidFile = new File(baseDir, 'es.pid')
this.nodeVersion = nodeVersion
homeDir = homeDir(baseDir, config.distribution, nodeVersion)

View File

@ -20,6 +20,7 @@ package org.elasticsearch.gradle.test
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
import org.elasticsearch.gradle.BuildPlugin
import org.gradle.api.DefaultTask
import org.gradle.api.Task
import org.gradle.api.internal.tasks.options.Option
import org.gradle.api.plugins.JavaBasePlugin
@ -27,12 +28,15 @@ import org.gradle.api.tasks.Input
import org.gradle.util.ConfigureUtil
/**
* Runs integration tests, but first starts an ES cluster,
* and passes the ES cluster info as parameters to the tests.
* A wrapper task around setting up a cluster and running rest tests.
*/
public class RestIntegTestTask extends RandomizedTestingTask {
public class RestIntegTestTask extends DefaultTask {
ClusterConfiguration clusterConfig
protected ClusterConfiguration clusterConfig
protected RandomizedTestingTask runner
protected Task clusterInit
/** Info about nodes in the integ test cluster. Note this is *not* available until runtime. */
List<NodeInfo> nodes
@ -44,35 +48,44 @@ public class RestIntegTestTask extends RandomizedTestingTask {
public RestIntegTestTask() {
description = 'Runs rest tests against an elasticsearch cluster.'
group = JavaBasePlugin.VERIFICATION_GROUP
dependsOn(project.testClasses)
classpath = project.sourceSets.test.runtimeClasspath
testClassesDir = project.sourceSets.test.output.classesDir
clusterConfig = new ClusterConfiguration(project)
runner = project.tasks.create("${name}Runner", RandomizedTestingTask.class)
super.dependsOn(runner)
clusterInit = project.tasks.create(name: "${name}Cluster#init", dependsOn: project.testClasses)
runner.dependsOn(clusterInit)
runner.classpath = project.sourceSets.test.runtimeClasspath
runner.testClassesDir = project.sourceSets.test.output.classesDir
clusterConfig = project.extensions.create("${name}Cluster", ClusterConfiguration.class, project)
// start with the common test configuration
configure(BuildPlugin.commonTestConfig(project))
runner.configure(BuildPlugin.commonTestConfig(project))
// override/add more for rest tests
parallelism = '1'
include('**/*IT.class')
systemProperty('tests.rest.load_packaged', 'false')
runner.parallelism = '1'
runner.include('**/*IT.class')
runner.systemProperty('tests.rest.load_packaged', 'false')
// we pass all nodes to the rest cluster to allow the clients to round-robin between them
// this is more realistic than just talking to a single node
systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}")
systemProperty('tests.config.dir', "${-> nodes[0].confDir}")
runner.systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}")
runner.systemProperty('tests.config.dir', "${-> nodes[0].confDir}")
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
// both as separate sysprops
systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
// copy the rest spec/tests into the test resources
RestSpecHack.configureDependencies(project)
project.afterEvaluate {
dependsOn(RestSpecHack.configureTask(project, includePackaged))
runner.dependsOn(RestSpecHack.configureTask(project, includePackaged))
}
// this must run after all projects have been configured, so we know any project
// references can be accessed as a fully configured
project.gradle.projectsEvaluated {
nodes = ClusterFormationTasks.setup(project, this, clusterConfig)
if (enabled == false) {
runner.enabled = false
clusterInit.enabled = false
return // no need to add cluster formation tasks if the task won't run!
}
nodes = ClusterFormationTasks.setup(project, "${name}Cluster", runner, clusterConfig)
super.dependsOn(runner.finalizedBy)
}
}
@ -84,25 +97,16 @@ public class RestIntegTestTask extends RandomizedTestingTask {
clusterConfig.debug = enabled;
}
@Input
public void cluster(Closure closure) {
ConfigureUtil.configure(closure, clusterConfig)
}
public ClusterConfiguration getCluster() {
return clusterConfig
}
public List<NodeInfo> getNodes() {
return nodes
}
@Override
public Task dependsOn(Object... dependencies) {
super.dependsOn(dependencies)
runner.dependsOn(dependencies)
for (Object dependency : dependencies) {
if (dependency instanceof Fixture) {
finalizedBy(((Fixture)dependency).stopTask)
runner.finalizedBy(((Fixture)dependency).stopTask)
}
}
return this
@ -110,11 +114,16 @@ public class RestIntegTestTask extends RandomizedTestingTask {
@Override
public void setDependsOn(Iterable<?> dependencies) {
super.setDependsOn(dependencies)
runner.setDependsOn(dependencies)
for (Object dependency : dependencies) {
if (dependency instanceof Fixture) {
finalizedBy(((Fixture)dependency).stopTask)
runner.finalizedBy(((Fixture)dependency).stopTask)
}
}
}
@Override
public Task mustRunAfter(Object... tasks) {
clusterInit.mustRunAfter(tasks)
}
}

View File

@ -43,7 +43,7 @@ public class RestTestPlugin implements Plugin<Project> {
}
RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
integTest.cluster.distribution = 'zip' // rest tests should run with the real zip
integTest.clusterConfig.distribution = 'zip' // rest tests should run with the real zip
integTest.mustRunAfter(project.precommit)
project.check.dependsOn(integTest)
}

View File

@ -18,7 +18,7 @@ public class RunTask extends DefaultTask {
clusterConfig.daemonize = false
clusterConfig.distribution = 'zip'
project.afterEvaluate {
ClusterFormationTasks.setup(project, this, clusterConfig)
ClusterFormationTasks.setup(project, name, this, clusterConfig)
}
}

View File

@ -81,20 +81,16 @@ project.rootProject.subprojects.findAll { it.path.startsWith(':modules:') }.each
}
}
// We would like to make sure integ tests for the distribution run after
// integ tests for the modules included in the distribution. However, gradle
// has a bug where depending on a task with a finalizer can sometimes not make
// the finalizer task follow the original task immediately. To work around this,
// we make the mustRunAfter the finalizer task itself.
// See https://discuss.gradle.org/t/cross-project-task-dependencies-ordering-screws-up-finalizers/13190
// integ tests for the modules included in the distribution.
project.configure(distributions.findAll { it.name != 'integ-test-zip' }) { Project distribution ->
distribution.afterEvaluate({
// some integTest tasks will have multiple finalizers
distribution.integTest.mustRunAfter module.tasks.find { t -> t.name.matches(".*integTest\$") }.getFinalizedBy()
distribution.integTest.mustRunAfter module.tasks.find { t -> t.name.matches(".*integTest\$") }
})
}
// also want to make sure the module's integration tests run after the integ-test-zip (ie rest tests)
module.afterEvaluate({
module.integTest.mustRunAfter(':distribution:integ-test-zip:integTest#stop')
module.integTest.mustRunAfter(':distribution:integ-test-zip:integTest')
})
restTestExpansions['expected.modules.count'] += 1
}
@ -129,14 +125,13 @@ configure(distributions) {
project.integTest {
dependsOn project.assemble
includePackaged project.name == 'integ-test-zip'
cluster {
distribution = project.name
}
if (project.name != 'integ-test-zip') {
// see note above with module mustRunAfter about why integTest#stop is used here
mustRunAfter ':distribution:integ-test-zip:integTest#stop'
mustRunAfter ':distribution:integ-test-zip:integTest'
}
}
project.integTestCluster {
distribution = project.name
}
processTestResources {
inputs.properties(project(':distribution').restTestExpansions)

View File

@ -129,27 +129,25 @@ buildRestTests.expectedUnconvertedCandidates = [
'reference/search/request/inner-hits.asciidoc',
]
integTest {
cluster {
setting 'script.inline', 'true'
setting 'script.stored', 'true'
setting 'script.max_compilations_per_minute', '1000'
/* Enable regexes in painless so our tests don't complain about example
* snippets that use them. */
setting 'script.painless.regex.enabled', 'true'
Closure configFile = {
extraConfigFile it, "src/test/cluster/config/$it"
}
configFile 'scripts/my_script.painless'
configFile 'scripts/my_init_script.painless'
configFile 'scripts/my_map_script.painless'
configFile 'scripts/my_combine_script.painless'
configFile 'scripts/my_reduce_script.painless'
configFile 'userdict_ja.txt'
configFile 'KeywordTokenizer.rbbi'
// Whitelist reindexing from the local node so we can test it.
setting 'reindex.remote.whitelist', '127.0.0.1:*'
integTestCluster {
setting 'script.inline', 'true'
setting 'script.stored', 'true'
setting 'script.max_compilations_per_minute', '1000'
/* Enable regexes in painless so our tests don't complain about example
* snippets that use them. */
setting 'script.painless.regex.enabled', 'true'
Closure configFile = {
extraConfigFile it, "src/test/cluster/config/$it"
}
configFile 'scripts/my_script.painless'
configFile 'scripts/my_init_script.painless'
configFile 'scripts/my_map_script.painless'
configFile 'scripts/my_combine_script.painless'
configFile 'scripts/my_reduce_script.painless'
configFile 'userdict_ja.txt'
configFile 'KeywordTokenizer.rbbi'
// Whitelist reindexing from the local node so we can test it.
setting 'reindex.remote.whitelist', '127.0.0.1:*'
}
// Build the cluster with all plugins
@ -161,10 +159,8 @@ project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each {
return
}
subproj.afterEvaluate { // need to wait until the project has been configured
integTest {
cluster {
plugin subproj.path
}
integTestCluster {
plugin subproj.path
}
}
}

View File

@ -35,8 +35,6 @@ dependencyLicenses {
mapping from: /asm-.*/, to: 'asm'
}
integTest {
cluster {
setting 'script.max_compilations_per_minute', '1000'
}
integTestCluster {
setting 'script.max_compilations_per_minute', '1000'
}

View File

@ -27,11 +27,9 @@ dependencies {
compile "com.github.spullara.mustache.java:compiler:0.9.3"
}
integTest {
cluster {
setting 'script.inline', 'true'
setting 'script.stored', 'true'
setting 'script.max_compilations_per_minute', '1000'
setting 'path.scripts', "${project.buildDir}/resources/test/templates"
}
integTestCluster {
setting 'script.inline', 'true'
setting 'script.stored', 'true'
setting 'script.max_compilations_per_minute', '1000'
setting 'path.scripts', "${project.buildDir}/resources/test/templates"
}

View File

@ -47,10 +47,8 @@ dependencies {
ant.references['regenerate.classpath'] = new Path(ant.project, configurations.regenerate.asPath)
ant.importBuild 'ant.xml'
integTest {
cluster {
setting 'script.max_compilations_per_minute', '1000'
}
integTestCluster {
setting 'script.max_compilations_per_minute', '1000'
}
/* Build Javadoc for the Java classes in Painless's public API that are in the

View File

@ -25,11 +25,9 @@ esplugin {
hasClientJar = true
}
integTest {
cluster {
// Whitelist reindexing from the local node so we can test it.
setting 'reindex.remote.whitelist', '127.0.0.1:*'
}
integTestCluster {
// Whitelist reindexing from the local node so we can test it.
setting 'reindex.remote.whitelist', '127.0.0.1:*'
}
run {

View File

@ -22,8 +22,6 @@ esplugin {
classname 'org.elasticsearch.plugin.repository.url.URLRepositoryPlugin'
}
integTest {
cluster {
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
}
}
integTestCluster {
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
}

View File

@ -39,7 +39,7 @@ task setupSeedNodeAndUnicastHostsFile(type: DefaultTask) {
// for unicast discovery
ClusterConfiguration config = new ClusterConfiguration(project)
config.clusterName = 'discovery-file-test-cluster'
List<NodeInfo> nodes = ClusterFormationTasks.setup(project, setupSeedNodeAndUnicastHostsFile, config)
List<NodeInfo> nodes = ClusterFormationTasks.setup(project, 'initialCluster', setupSeedNodeAndUnicastHostsFile, config)
File srcUnicastHostsFile = file('build/cluster/unicast_hosts.txt')
// write the unicast_hosts.txt file to a temporary location to be used by the second cluster
@ -49,11 +49,13 @@ setupSeedNodeAndUnicastHostsFile.doLast {
}
// second cluster, which will connect to the first via the unicast_hosts.txt file
integTestCluster {
clusterName = 'discovery-file-test-cluster'
extraConfigFile 'discovery-file/unicast_hosts.txt', srcUnicastHostsFile
}
integTestRunner.finalizedBy ':plugins:discovery-file:initialCluster#stop'
integTest {
dependsOn(setupSeedNodeAndUnicastHostsFile)
cluster {
clusterName = 'discovery-file-test-cluster'
extraConfigFile 'discovery-file/unicast_hosts.txt', srcUnicastHostsFile
}
finalizedBy ':plugins:discovery-file:setupSeedNodeAndUnicastHostsFile#stop'
}

View File

@ -22,8 +22,6 @@ esplugin {
classname 'org.elasticsearch.ingest.useragent.IngestUserAgentPlugin'
}
integTest {
cluster {
extraConfigFile 'ingest-user-agent/test-regexes.yaml', 'test/test-regexes.yaml'
}
}
integTestCluster {
extraConfigFile 'ingest-user-agent/test-regexes.yaml', 'test/test-regexes.yaml'
}

View File

@ -43,6 +43,8 @@ task exampleFixture(type: org.elasticsearch.gradle.test.Fixture) {
integTest {
dependsOn exampleFixture
}
integTestRunner {
systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }"
}

View File

@ -41,10 +41,8 @@ thirdPartyAudit.excludes = [
'org.slf4j.LoggerFactory',
]
integTest {
cluster {
setting 'cloud.azure.storage.my_account_test.account', 'cloudazureresource'
setting 'cloud.azure.storage.my_account_test.key', 'abcdefgh'
setting 'script.stored', 'true'
}
}
integTestCluster {
setting 'cloud.azure.storage.my_account_test.account', 'cloudazureresource'
setting 'cloud.azure.storage.my_account_test.key', 'abcdefgh'
setting 'script.stored', 'true'
}

View File

@ -35,12 +35,13 @@ apply plugin: 'elasticsearch.rest-test'
*/
integTest {
includePackaged = true
cluster {
numNodes = 4
numBwcNodes = 2
bwcVersion = "5.4.0-SNAPSHOT"
setting 'logger.org.elasticsearch', 'DEBUG'
}
}
integTestCluster {
numNodes = 4
numBwcNodes = 2
bwcVersion = "5.4.0-SNAPSHOT"
setting 'logger.org.elasticsearch', 'DEBUG'
}
repositories {

View File

@ -23,25 +23,33 @@ apply plugin: 'elasticsearch.standalone-test'
task remoteClusterTest(type: RestIntegTestTask) {
mustRunAfter(precommit)
cluster {
distribution = 'zip'
numNodes = 2
clusterName = 'remote-cluster'
setting 'search.remote.connect', false
}
}
remoteClusterTestCluster {
distribution = 'zip'
numNodes = 2
clusterName = 'remote-cluster'
setting 'search.remote.connect', false
}
remoteClusterTestRunner {
systemProperty 'tests.rest.suite', 'remote_cluster'
}
task mixedClusterTest(type: RestIntegTestTask) {
dependsOn(remoteClusterTest)
cluster {
distribution = 'zip'
setting 'search.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\""
setting 'search.remote.connections_per_cluster', 1
setting 'search.remote.connect', true
}
dependsOn(remoteClusterTestRunner)
}
mixedClusterTestCluster {
distribution = 'zip'
setting 'search.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\""
setting 'search.remote.connections_per_cluster', 1
setting 'search.remote.connect', true
}
mixedClusterTestRunner {
systemProperty 'tests.rest.suite', 'multi_cluster'
finalizedBy 'remoteClusterTest#node0.stop','remoteClusterTest#node1.stop'
finalizedBy 'remoteClusterTestCluster#node0.stop','remoteClusterTestCluster#node1.stop'
}
task integTest {

View File

@ -23,43 +23,55 @@ apply plugin: 'elasticsearch.standalone-test'
task oldClusterTest(type: RestIntegTestTask) {
mustRunAfter(precommit)
cluster {
distribution = 'zip'
bwcVersion = '5.4.0-SNAPSHOT' // TODO: either randomize, or make this settable with sysprop
numBwcNodes = 2
numNodes = 2
clusterName = 'rolling-upgrade'
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
setting 'http.content_type.required', 'true'
}
}
oldClusterTestCluster {
distribution = 'zip'
bwcVersion = '5.4.0-SNAPSHOT' // TODO: either randomize, or make this settable with sysprop
numBwcNodes = 2
numNodes = 2
clusterName = 'rolling-upgrade'
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
setting 'http.content_type.required', 'true'
}
oldClusterTestRunner {
systemProperty 'tests.rest.suite', 'old_cluster'
}
task mixedClusterTest(type: RestIntegTestTask) {
dependsOn(oldClusterTest, 'oldClusterTest#node1.stop')
cluster {
distribution = 'zip'
clusterName = 'rolling-upgrade'
unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() }
dataDir = "${-> oldClusterTest.nodes[1].dataDir}"
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
}
dependsOn(oldClusterTestRunner, 'oldClusterTestCluster#node1.stop')
}
mixedClusterTestCluster {
distribution = 'zip'
clusterName = 'rolling-upgrade'
unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() }
dataDir = "${-> oldClusterTest.nodes[1].dataDir}"
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
}
mixedClusterTestRunner {
systemProperty 'tests.rest.suite', 'mixed_cluster'
finalizedBy 'oldClusterTest#node0.stop'
finalizedBy 'oldClusterTestCluster#node0.stop'
}
task upgradedClusterTest(type: RestIntegTestTask) {
dependsOn(mixedClusterTest, 'oldClusterTest#node0.stop')
cluster {
distribution = 'zip'
clusterName = 'rolling-upgrade'
unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() }
dataDir = "${-> oldClusterTest.nodes[0].dataDir}"
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
}
dependsOn(mixedClusterTestRunner, 'oldClusterTestCluster#node0.stop')
}
upgradedClusterTestCluster {
distribution = 'zip'
clusterName = 'rolling-upgrade'
unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() }
dataDir = "${-> oldClusterTest.nodes[0].dataDir}"
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
}
upgradedClusterTestRunner {
systemProperty 'tests.rest.suite', 'upgraded_cluster'
// only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion
finalizedBy 'mixedClusterTest#stop'
finalizedBy 'mixedClusterTestCluster#stop'
}
task integTest {

View File

@ -24,8 +24,6 @@ dependencies {
testCompile project(path: ':modules:ingest-common', configuration: 'runtime')
}
integTest {
cluster {
setting 'node.ingest', 'false'
}
integTestCluster {
setting 'node.ingest', 'false'
}

View File

@ -28,10 +28,8 @@ dependencies {
testCompile project(path: ':modules:reindex', configuration: 'runtime')
}
integTest {
cluster {
plugin ':plugins:ingest-geoip'
setting 'script.inline', 'true'
setting 'path.scripts', "${project.buildDir}/resources/test/scripts"
}
integTestCluster {
plugin ':plugins:ingest-geoip'
setting 'script.inline', 'true'
setting 'path.scripts', "${project.buildDir}/resources/test/scripts"
}

View File

@ -22,7 +22,8 @@ apply plugin: 'elasticsearch.rest-test'
integTest {
includePackaged = true
cluster {
numNodes = 2
}
}
integTestCluster {
numNodes = 2
}

View File

@ -24,10 +24,8 @@ apply plugin: 'elasticsearch.rest-test'
ext.pluginsCount = 0
project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj ->
integTest {
cluster {
plugin subproj.path
}
integTestCluster {
plugin subproj.path
}
pluginsCount += 1
}

View File

@ -20,8 +20,6 @@
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.rest-test'
integTest {
cluster {
setting 'script.max_compilations_per_minute', '1000'
}
integTestCluster {
setting 'script.max_compilations_per_minute', '1000'
}

View File

@ -24,47 +24,32 @@ import org.elasticsearch.gradle.test.NodeInfo
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.rest-test'
List<NodeInfo> oneNodes
task setupClusterOne(type: DefaultTask) {
mustRunAfter(precommit)
ClusterConfiguration configOne = new ClusterConfiguration(project)
configOne.clusterName = 'one'
configOne.setting('node.name', 'one')
oneNodes = ClusterFormationTasks.setup(project, setupClusterOne, configOne)
}
List<NodeInfo> twoNodes
task setupClusterTwo(type: DefaultTask) {
mustRunAfter(precommit)
ClusterConfiguration configTwo = new ClusterConfiguration(project)
configTwo.clusterName = 'two'
configTwo.setting('node.name', 'two')
twoNodes = ClusterFormationTasks.setup(project, setupClusterTwo, configTwo)
}
integTest {
dependsOn(setupClusterOne, setupClusterTwo)
cluster {
// tribe nodes had a bug where if explicit ports was specified for the tribe node, the dynamic socket permissions that were applied
// would not account for the fact that the internal node client needed to bind to sockets too; thus, we use explicit port ranges to
// ensure that the code that fixes this bug is exercised
setting 'http.port', '40200-40249'
setting 'transport.tcp.port', '40300-40349'
setting 'node.name', 'quest'
setting 'tribe.one.cluster.name', 'one'
setting 'tribe.one.discovery.zen.ping.unicast.hosts', "'${-> oneNodes.get(0).transportUri()}'"
setting 'tribe.one.http.enabled', 'true'
setting 'tribe.one.http.port', '40250-40299'
setting 'tribe.one.transport.tcp.port', '40350-40399'
setting 'tribe.two.cluster.name', 'two'
setting 'tribe.two.discovery.zen.ping.unicast.hosts', "'${-> twoNodes.get(0).transportUri()}'"
setting 'tribe.two.http.enabled', 'true'
setting 'tribe.two.http.port', '40250-40299'
setting 'tribe.two.transport.tcp.port', '40250-40399'
}
// need to kill the standalone nodes here
finalizedBy 'setupClusterOne#stop'
finalizedBy 'setupClusterTwo#stop'
ClusterConfiguration configOne = new ClusterConfiguration(project)
configOne.clusterName = 'one'
configOne.setting('node.name', 'one')
List<NodeInfo> oneNodes = ClusterFormationTasks.setup(project, 'clusterOne', integTestRunner, configOne)
ClusterConfiguration configTwo = new ClusterConfiguration(project)
configTwo.clusterName = 'two'
configTwo.setting('node.name', 'two')
List<NodeInfo> twoNodes = ClusterFormationTasks.setup(project, 'clusterTwo', integTestRunner, configTwo)
integTestCluster {
// tribe nodes had a bug where if explicit ports was specified for the tribe node, the dynamic socket permissions that were applied
// would not account for the fact that the internal node client needed to bind to sockets too; thus, we use explicit port ranges to
// ensure that the code that fixes this bug is exercised
setting 'http.port', '40200-40249'
setting 'transport.tcp.port', '40300-40349'
setting 'node.name', 'quest'
setting 'tribe.one.cluster.name', 'one'
setting 'tribe.one.discovery.zen.ping.unicast.hosts', "'${-> oneNodes.get(0).transportUri()}'"
setting 'tribe.one.http.enabled', 'true'
setting 'tribe.one.http.port', '40250-40299'
setting 'tribe.one.transport.tcp.port', '40350-40399'
setting 'tribe.two.cluster.name', 'two'
setting 'tribe.two.discovery.zen.ping.unicast.hosts', "'${-> twoNodes.get(0).transportUri()}'"
setting 'tribe.two.http.enabled', 'true'
setting 'tribe.two.http.port', '40250-40299'
setting 'tribe.two.transport.tcp.port', '40250-40399'
}