In the rolling upgrades tests, we do not want to stop nodes

automatically between tasks, as we want some of the nodes from
the previous task to continue running in the next task. This
commit enables a cluster configuration setting to not stop
nodes automatically after a task runs, but instead the creator
of the test task must stop the running nodes explicitly in a
cleanup phase.
This commit is contained in:
Ali Beyad 2016-09-16 10:14:51 -04:00
parent ba072ec18e
commit 56f97500c6
3 changed files with 21 additions and 5 deletions

View File

@ -62,6 +62,14 @@ class ClusterConfiguration {
@Input
boolean debug = false
/**
* Whether to stop the nodes in the cluster upon task completion. The only reason to
* set this to false is if you want the nodes in the cluster to hang around and you
* will clean them up later by calling the `taskName#nodeX.stop` task explicitly.
*/
@Input
boolean stopNodesOnCompletion = true
@Input
String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') +
" " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
@ -95,8 +103,11 @@ class ClusterConfiguration {
@Input
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
File tmpFile = new File(node.cwd, 'wait.success')
ant.echo("==> Current time: " + new Date());
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}",
ant.echo("==> [${new Date()}] checking health: http://${node.httpUri()}/_cluster/health?wait_for_nodes>=${numNodes}")
// checking here for wait_for_nodes to be >= the number of nodes because its possible
// this cluster is attempting to connect to nodes created by another task (same cluster name),
// so there will be more nodes in that case in the cluster state
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes>=${numNodes}",
dest: tmpFile.toString(),
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
retries: 10)

View File

@ -173,9 +173,9 @@ class ClusterFormationTasks {
Task start = configureStartTask(taskName(task, node, 'start'), project, setup, node)
if (node.config.daemonize) {
Task stop = configureStopTask(taskName(task, node, 'stop'), project, [], node)
if (node.config.daemonize && node.config.stopNodesOnCompletion) {
// if we are running in the background, make sure to stop the server when the task completes
Task stop = configureStopTask(taskName(task, node, 'stop'), project, [], node)
task.finalizedBy(stop)
}
return start

View File

@ -33,6 +33,7 @@ task oldClusterTest(type: RestIntegTestTask) {
//numBwcNodes = 2
numNodes = 2
clusterName = 'rolling-upgrade'
stopNodesOnCompletion = false
}
systemProperty 'tests.rest.suite', 'old_cluster'
systemProperty 'tests.rest.preserve_indices', 'true'
@ -45,6 +46,7 @@ task mixedClusterTest(type: RestIntegTestTask) {
clusterName = 'rolling-upgrade'
unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() }
dataDir = "${-> oldClusterTest.nodes[1].dataDir}"
stopNodesOnCompletion = false
}
systemProperty 'tests.rest.suite', 'mixed_cluster'
systemProperty 'tests.rest.preserve_indices', 'true'
@ -61,7 +63,10 @@ task upgradedClusterTest(type: RestIntegTestTask) {
systemProperty 'tests.rest.suite', 'upgraded_cluster'
}
// only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion
upgradedClusterTest.finalizedBy 'mixedClusterTest#stop'
task integTest {
dependsOn = [oldClusterTest, mixedClusterTest, upgradedClusterTest]
dependsOn = [upgradedClusterTest]
}
check.dependsOn(integTest)