Partial Revert "Convert RunTask to use testclusers, remove ClusterFormationTasks (#47572)"
This reverts the removal of the ClusterFormationTaks from
commit 36d018c909
so they are usable for a
bit longer in the hadoop build.
This commit is contained in:
parent
7fddf198b7
commit
6c9305dc78
|
@ -0,0 +1,267 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import org.elasticsearch.gradle.Version
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.tasks.Input
|
||||
|
||||
/** Configuration for an elasticsearch cluster, used for integration tests. */
|
||||
class ClusterConfiguration {
|
||||
|
||||
private final Project project
|
||||
|
||||
@Input
|
||||
String distribution = 'default'
|
||||
|
||||
@Input
|
||||
int numNodes = 1
|
||||
|
||||
@Input
|
||||
int numBwcNodes = 0
|
||||
|
||||
@Input
|
||||
Version bwcVersion = null
|
||||
|
||||
@Input
|
||||
int httpPort = 0
|
||||
|
||||
@Input
|
||||
int transportPort = 0
|
||||
|
||||
/**
|
||||
* An override of the data directory. Input is the node number and output
|
||||
* is the override data directory.
|
||||
*/
|
||||
@Input
|
||||
Closure<String> dataDir = null
|
||||
|
||||
/** Optional override of the cluster name. */
|
||||
@Input
|
||||
String clusterName = null
|
||||
|
||||
@Input
|
||||
boolean daemonize = true
|
||||
|
||||
@Input
|
||||
boolean debug = false
|
||||
|
||||
/**
|
||||
* Configuration of the setting {@code discovery.zen.minimum_master_nodes} on the nodes.
|
||||
* In case of more than one node, this defaults to the number of nodes
|
||||
*/
|
||||
@Input
|
||||
Closure<Integer> minimumMasterNodes = {
|
||||
if (bwcVersion != null && bwcVersion.before("6.5.0")) {
|
||||
return numNodes > 1 ? numNodes : -1
|
||||
} else {
|
||||
return numNodes > 1 ? numNodes.intdiv(2) + 1 : -1
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether the initial_master_nodes setting should be automatically derived from the nodes
|
||||
* in the cluster. Only takes effect if all nodes in the cluster understand this setting
|
||||
* and the discovery type is not explicitly set.
|
||||
*/
|
||||
@Input
|
||||
boolean autoSetInitialMasterNodes = true
|
||||
|
||||
/**
|
||||
* Whether the file-based discovery provider should be automatically setup based on
|
||||
* the nodes in the cluster. Only takes effect if no other hosts provider is already
|
||||
* configured.
|
||||
*/
|
||||
@Input
|
||||
boolean autoSetHostsProvider = true
|
||||
|
||||
@Input
|
||||
String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + System.getProperty('tests.jvm.argline', '')
|
||||
|
||||
/**
|
||||
* Should the shared environment be cleaned on cluster startup? Defaults
|
||||
* to {@code true} so we run with a clean cluster but some tests wish to
|
||||
* preserve snapshots between clusters so they set this to true.
|
||||
*/
|
||||
@Input
|
||||
boolean cleanShared = true
|
||||
|
||||
/**
|
||||
* A closure to call which returns the unicast host to connect to for cluster formation.
|
||||
*
|
||||
* This allows multi node clusters, or a new cluster to connect to an existing cluster.
|
||||
* The closure takes three arguments, the NodeInfo for the first node in the cluster,
|
||||
* the NodeInfo for the node current being configured, an AntBuilder which may be used
|
||||
* to wait on conditions before returning.
|
||||
*/
|
||||
@Input
|
||||
Closure unicastTransportUri = { NodeInfo seedNode, NodeInfo node, AntBuilder ant ->
|
||||
if (seedNode == node) {
|
||||
return null
|
||||
}
|
||||
ant.waitfor(maxwait: '40', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond',
|
||||
timeoutproperty: "failed.${seedNode.transportPortsFile.path}") {
|
||||
resourceexists {
|
||||
file(file: seedNode.transportPortsFile.toString())
|
||||
}
|
||||
}
|
||||
if (ant.properties.containsKey("failed.${seedNode.transportPortsFile.path}".toString())) {
|
||||
throw new GradleException("Failed to locate seed node transport file [${seedNode.transportPortsFile}]: " +
|
||||
"timed out waiting for it to be created after 40 seconds")
|
||||
}
|
||||
return seedNode.transportUri()
|
||||
}
|
||||
|
||||
/**
|
||||
* A closure to call which returns a manually supplied list of unicast seed hosts.
|
||||
*/
|
||||
@Input
|
||||
Closure<List<String>> otherUnicastHostAddresses = {
|
||||
Collections.emptyList()
|
||||
}
|
||||
|
||||
/**
|
||||
* A closure to call before the cluster is considered ready. The closure is passed the node info,
|
||||
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
|
||||
* condition is for http on the http port.
|
||||
*/
|
||||
@Input
|
||||
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
String waitUrl = "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow"
|
||||
ant.echo(message: "==> [${new Date()}] checking health: ${waitUrl}",
|
||||
level: 'info')
|
||||
// checking here for wait_for_nodes to be >= the number of nodes because its possible
|
||||
// this cluster is attempting to connect to nodes created by another task (same cluster name),
|
||||
// so there will be more nodes in that case in the cluster state
|
||||
ant.get(src: waitUrl,
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
|
||||
retries: 10)
|
||||
return tmpFile.exists()
|
||||
}
|
||||
|
||||
/**
|
||||
* The maximum number of seconds to wait for nodes to complete startup, which includes writing
|
||||
* the ports files for the transports and the pid file. This wait time occurs before the wait
|
||||
* condition is executed.
|
||||
*/
|
||||
@Input
|
||||
int nodeStartupWaitSeconds = 30
|
||||
|
||||
public ClusterConfiguration(Project project) {
|
||||
this.project = project
|
||||
}
|
||||
|
||||
// **Note** for systemProperties, settings, keystoreFiles etc:
|
||||
// value could be a GString that is evaluated to just a String
|
||||
// there are cases when value depends on task that is not executed yet on configuration stage
|
||||
Map<String, Object> systemProperties = new HashMap<>()
|
||||
|
||||
Map<String, Object> environmentVariables = new HashMap<>()
|
||||
|
||||
Map<String, Object> settings = new HashMap<>()
|
||||
|
||||
Map<String, String> keystoreSettings = new HashMap<>()
|
||||
|
||||
Map<String, Object> keystoreFiles = new HashMap<>()
|
||||
|
||||
// map from destination path, to source file
|
||||
Map<String, Object> extraConfigFiles = new HashMap<>()
|
||||
|
||||
LinkedHashMap<String, Object> plugins = new LinkedHashMap<>()
|
||||
|
||||
List<Project> modules = new ArrayList<>()
|
||||
|
||||
LinkedHashMap<String, Object[]> setupCommands = new LinkedHashMap<>()
|
||||
|
||||
List<Object> dependencies = new ArrayList<>()
|
||||
|
||||
@Input
|
||||
void systemProperty(String property, Object value) {
|
||||
systemProperties.put(property, value)
|
||||
}
|
||||
|
||||
@Input
|
||||
void environment(String variable, Object value) {
|
||||
environmentVariables.put(variable, value)
|
||||
}
|
||||
|
||||
@Input
|
||||
void setting(String name, Object value) {
|
||||
settings.put(name, value)
|
||||
}
|
||||
|
||||
@Input
|
||||
void keystoreSetting(String name, String value) {
|
||||
keystoreSettings.put(name, value)
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a file to the keystore. The name is the secure setting name, and the sourceFile
|
||||
* is anything accepted by project.file()
|
||||
*/
|
||||
@Input
|
||||
void keystoreFile(String name, Object sourceFile) {
|
||||
keystoreFiles.put(name, sourceFile)
|
||||
}
|
||||
|
||||
@Input
|
||||
void plugin(String path) {
|
||||
Project pluginProject = project.project(path)
|
||||
plugins.put(pluginProject.name, pluginProject)
|
||||
}
|
||||
|
||||
@Input
|
||||
void mavenPlugin(String name, String mavenCoords) {
|
||||
plugins.put(name, mavenCoords)
|
||||
}
|
||||
|
||||
/** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */
|
||||
@Input
|
||||
void module(Project moduleProject) {
|
||||
modules.add(moduleProject)
|
||||
}
|
||||
|
||||
@Input
|
||||
void setupCommand(String name, Object... args) {
|
||||
setupCommands.put(name, args)
|
||||
}
|
||||
|
||||
/**
|
||||
* Add an extra configuration file. The path is relative to the config dir, and the sourceFile
|
||||
* is anything accepted by project.file()
|
||||
*/
|
||||
@Input
|
||||
void extraConfigFile(String path, Object sourceFile) {
|
||||
if (path == 'elasticsearch.yml') {
|
||||
throw new GradleException('Overwriting elasticsearch.yml is not allowed, add additional settings using cluster { setting "foo", "bar" }')
|
||||
}
|
||||
extraConfigFiles.put(path, sourceFile)
|
||||
}
|
||||
|
||||
/** Add dependencies that must be run before the first task setting up the cluster. */
|
||||
@Input
|
||||
void dependsOn(Object... deps) {
|
||||
dependencies.addAll(deps)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,297 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import com.sun.jna.Native
|
||||
import com.sun.jna.WString
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.Version
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.gradle.api.Project
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.Paths
|
||||
/**
|
||||
* A container for the files and configuration associated with a single node in a test cluster.
|
||||
*/
|
||||
class NodeInfo {
|
||||
/** Gradle project this node is part of */
|
||||
Project project
|
||||
|
||||
/** common configuration for all nodes, including this one */
|
||||
ClusterConfiguration config
|
||||
|
||||
/** node number within the cluster, for creating unique names and paths */
|
||||
int nodeNum
|
||||
|
||||
/** name of the cluster this node is part of */
|
||||
String clusterName
|
||||
|
||||
/** root directory all node files and operations happen under */
|
||||
File baseDir
|
||||
|
||||
/** shared data directory all nodes share */
|
||||
File sharedDir
|
||||
|
||||
/** the pid file the node will use */
|
||||
File pidFile
|
||||
|
||||
/** a file written by elasticsearch containing the ports of each bound address for http */
|
||||
File httpPortsFile
|
||||
|
||||
/** a file written by elasticsearch containing the ports of each bound address for transport */
|
||||
File transportPortsFile
|
||||
|
||||
/** elasticsearch home dir */
|
||||
File homeDir
|
||||
|
||||
/** config directory */
|
||||
File pathConf
|
||||
|
||||
/** data directory (as an Object, to allow lazy evaluation) */
|
||||
Object dataDir
|
||||
|
||||
/** THE config file */
|
||||
File configFile
|
||||
|
||||
/** working directory for the node process */
|
||||
File cwd
|
||||
|
||||
/** file that if it exists, indicates the node failed to start */
|
||||
File failedMarker
|
||||
|
||||
/** stdout/stderr log of the elasticsearch process for this node */
|
||||
File startLog
|
||||
|
||||
/** directory to install plugins from */
|
||||
File pluginsTmpDir
|
||||
|
||||
/** Major version of java this node runs with, or {@code null} if using the runtime java version */
|
||||
Integer javaVersion
|
||||
|
||||
/** environment variables to start the node with */
|
||||
Map<String, String> env
|
||||
|
||||
/** arguments to start the node with */
|
||||
List<String> args
|
||||
|
||||
/** Executable to run the bin/elasticsearch with, either cmd or sh */
|
||||
String executable
|
||||
|
||||
/** Path to the elasticsearch start script */
|
||||
private Object esScript
|
||||
|
||||
/** script to run when running in the background */
|
||||
private File wrapperScript
|
||||
|
||||
/** buffer for ant output when starting this node */
|
||||
ByteArrayOutputStream buffer = new ByteArrayOutputStream()
|
||||
|
||||
/** the version of elasticsearch that this node runs */
|
||||
Version nodeVersion
|
||||
|
||||
/** true if the node is not the current version */
|
||||
boolean isBwcNode
|
||||
|
||||
/** Holds node configuration for part of a test cluster. */
|
||||
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, String nodeVersion, File sharedDir) {
|
||||
this.config = config
|
||||
this.nodeNum = nodeNum
|
||||
this.project = project
|
||||
this.sharedDir = sharedDir
|
||||
if (config.clusterName != null) {
|
||||
clusterName = config.clusterName
|
||||
} else {
|
||||
clusterName = project.path.replace(':', '_').substring(1) + '_' + prefix
|
||||
}
|
||||
baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}")
|
||||
pidFile = new File(baseDir, 'es.pid')
|
||||
this.nodeVersion = Version.fromString(nodeVersion)
|
||||
this.isBwcNode = this.nodeVersion.before(VersionProperties.elasticsearch)
|
||||
homeDir = new File(baseDir, "elasticsearch-${nodeVersion}")
|
||||
pathConf = new File(homeDir, 'config')
|
||||
if (config.dataDir != null) {
|
||||
dataDir = "${config.dataDir(nodeNum)}"
|
||||
} else {
|
||||
dataDir = new File(homeDir, "data")
|
||||
}
|
||||
configFile = new File(pathConf, 'elasticsearch.yml')
|
||||
// even for rpm/deb, the logs are under home because we dont start with real services
|
||||
File logsDir = new File(homeDir, 'logs')
|
||||
httpPortsFile = new File(logsDir, 'http.ports')
|
||||
transportPortsFile = new File(logsDir, 'transport.ports')
|
||||
cwd = new File(baseDir, "cwd")
|
||||
failedMarker = new File(cwd, 'run.failed')
|
||||
startLog = new File(cwd, 'run.log')
|
||||
pluginsTmpDir = new File(baseDir, "plugins tmp")
|
||||
|
||||
args = []
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
executable = 'cmd'
|
||||
args.add('/C')
|
||||
args.add('"') // quote the entire command
|
||||
wrapperScript = new File(cwd, "run.bat")
|
||||
/*
|
||||
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to
|
||||
* getting the short name requiring the path to already exist.
|
||||
*/
|
||||
esScript = "${-> binPath().resolve('elasticsearch.bat').toString()}"
|
||||
} else {
|
||||
executable = 'bash'
|
||||
wrapperScript = new File(cwd, "run")
|
||||
esScript = binPath().resolve('elasticsearch')
|
||||
}
|
||||
if (config.daemonize) {
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
/*
|
||||
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to
|
||||
* getting the short name requiring the path to already exist.
|
||||
*/
|
||||
args.add("${-> getShortPathName(wrapperScript.toString())}")
|
||||
} else {
|
||||
args.add("${wrapperScript}")
|
||||
}
|
||||
} else {
|
||||
args.add("${esScript}")
|
||||
}
|
||||
|
||||
|
||||
if (this.nodeVersion.before("6.2.0")) {
|
||||
javaVersion = 8
|
||||
} else if (this.nodeVersion.onOrAfter("6.2.0") && this.nodeVersion.before("6.3.0")) {
|
||||
javaVersion = 9
|
||||
} else if (this.nodeVersion.onOrAfter("6.3.0") && this.nodeVersion.before("6.5.0")) {
|
||||
javaVersion = 10
|
||||
}
|
||||
|
||||
args.addAll("-E", "node.portsfile=true")
|
||||
env = [:]
|
||||
env.putAll(config.environmentVariables)
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.key.startsWith('tests.es.')) {
|
||||
args.add("-E")
|
||||
args.add("${property.key.substring('tests.es.'.size())}=${property.value}")
|
||||
}
|
||||
}
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
/*
|
||||
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to
|
||||
* getting the short name requiring the path to already exist.
|
||||
*/
|
||||
env.put('ES_PATH_CONF', "${-> getShortPathName(pathConf.toString())}")
|
||||
}
|
||||
else {
|
||||
env.put('ES_PATH_CONF', pathConf)
|
||||
}
|
||||
if (!System.properties.containsKey("tests.es.path.data")) {
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
/*
|
||||
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to
|
||||
* getting the short name requiring the path to already exist. This one is extra tricky because usually we rely on the node
|
||||
* creating its data directory on startup but we simply can not do that here because getting the short path name requires
|
||||
* the directory to already exist. Therefore, we create this directory immediately before getting the short name.
|
||||
*/
|
||||
args.addAll("-E", "path.data=${-> Files.createDirectories(Paths.get(dataDir.toString())); getShortPathName(dataDir.toString())}")
|
||||
} else {
|
||||
args.addAll("-E", "path.data=${-> dataDir.toString()}")
|
||||
}
|
||||
}
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
args.add('"') // end the entire command, quoted
|
||||
}
|
||||
}
|
||||
|
||||
Path binPath() {
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
return Paths.get(getShortPathName(new File(homeDir, 'bin').toString()))
|
||||
} else {
|
||||
return Paths.get(new File(homeDir, 'bin').toURI())
|
||||
}
|
||||
}
|
||||
|
||||
static String getShortPathName(String path) {
|
||||
assert Os.isFamily(Os.FAMILY_WINDOWS)
|
||||
final WString longPath = new WString("\\\\?\\" + path)
|
||||
// first we get the length of the buffer needed
|
||||
final int length = JNAKernel32Library.getInstance().GetShortPathNameW(longPath, null, 0)
|
||||
if (length == 0) {
|
||||
throw new IllegalStateException("path [" + path + "] encountered error [" + Native.getLastError() + "]")
|
||||
}
|
||||
final char[] shortPath = new char[length]
|
||||
// knowing the length of the buffer, now we get the short name
|
||||
if (JNAKernel32Library.getInstance().GetShortPathNameW(longPath, shortPath, length) == 0) {
|
||||
throw new IllegalStateException("path [" + path + "] encountered error [" + Native.getLastError() + "]")
|
||||
}
|
||||
// we have to strip the \\?\ away from the path for cmd.exe
|
||||
return Native.toString(shortPath).substring(4)
|
||||
}
|
||||
|
||||
/** Returns debug string for the command that started this node. */
|
||||
String getCommandString() {
|
||||
String esCommandString = "\nNode ${nodeNum} configuration:\n"
|
||||
esCommandString += "|-----------------------------------------\n"
|
||||
esCommandString += "| cwd: ${cwd}\n"
|
||||
esCommandString += "| command: ${executable} ${args.join(' ')}\n"
|
||||
esCommandString += '| environment:\n'
|
||||
env.each { k, v -> esCommandString += "| ${k}: ${v}\n" }
|
||||
if (config.daemonize) {
|
||||
esCommandString += "|\n| [${wrapperScript.name}]\n"
|
||||
wrapperScript.eachLine('UTF-8', { line -> esCommandString += " ${line}\n"})
|
||||
}
|
||||
esCommandString += '|\n| [elasticsearch.yml]\n'
|
||||
configFile.eachLine('UTF-8', { line -> esCommandString += "| ${line}\n" })
|
||||
esCommandString += "|-----------------------------------------"
|
||||
return esCommandString
|
||||
}
|
||||
|
||||
void writeWrapperScript() {
|
||||
String argsPasser = '"$@"'
|
||||
String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi"
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
argsPasser = '%*'
|
||||
exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )"
|
||||
}
|
||||
wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
|
||||
}
|
||||
|
||||
/** Returns an address and port suitable for a uri to connect to this node over http */
|
||||
String httpUri() {
|
||||
return httpPortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
|
||||
/** Returns an address and port suitable for a uri to connect to this node over transport protocol */
|
||||
String transportUri() {
|
||||
return transportPortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
|
||||
/** Returns the file which contains the transport protocol ports for this node */
|
||||
File getTransportPortsFile() {
|
||||
return transportPortsFile
|
||||
}
|
||||
|
||||
/** Returns the data directory for this node */
|
||||
File getDataDir() {
|
||||
if (!(dataDir instanceof File)) {
|
||||
return new File(dataDir)
|
||||
}
|
||||
return dataDir
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue