From 6c9305dc789af6d150582e9e526a0557ccc0c2fe Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Tue, 15 Oct 2019 16:43:46 +0300 Subject: [PATCH] Partial Revert "Convert RunTask to use testclusers, remove ClusterFormationTasks (#47572)" This reverts the removal of the ClusterFormationTaks from commit 36d018c90907d1f7da24bd8449248e81b17c2530 so they are usable for a bit longer in the hadoop build. --- .../gradle/test/ClusterConfiguration.groovy | 267 +++++ .../gradle/test/ClusterFormationTasks.groovy | 1019 +++++++++++++++++ .../elasticsearch/gradle/test/NodeInfo.groovy | 297 +++++ 3 files changed, 1583 insertions(+) create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy new file mode 100644 index 00000000000..0053c0a40b4 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -0,0 +1,267 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.test + +import org.elasticsearch.gradle.Version +import org.gradle.api.GradleException +import org.gradle.api.Project +import org.gradle.api.tasks.Input + +/** Configuration for an elasticsearch cluster, used for integration tests. */ +class ClusterConfiguration { + + private final Project project + + @Input + String distribution = 'default' + + @Input + int numNodes = 1 + + @Input + int numBwcNodes = 0 + + @Input + Version bwcVersion = null + + @Input + int httpPort = 0 + + @Input + int transportPort = 0 + + /** + * An override of the data directory. Input is the node number and output + * is the override data directory. + */ + @Input + Closure dataDir = null + + /** Optional override of the cluster name. */ + @Input + String clusterName = null + + @Input + boolean daemonize = true + + @Input + boolean debug = false + + /** + * Configuration of the setting {@code discovery.zen.minimum_master_nodes} on the nodes. + * In case of more than one node, this defaults to the number of nodes + */ + @Input + Closure minimumMasterNodes = { + if (bwcVersion != null && bwcVersion.before("6.5.0")) { + return numNodes > 1 ? numNodes : -1 + } else { + return numNodes > 1 ? numNodes.intdiv(2) + 1 : -1 + } + } + + /** + * Whether the initial_master_nodes setting should be automatically derived from the nodes + * in the cluster. Only takes effect if all nodes in the cluster understand this setting + * and the discovery type is not explicitly set. + */ + @Input + boolean autoSetInitialMasterNodes = true + + /** + * Whether the file-based discovery provider should be automatically setup based on + * the nodes in the cluster. Only takes effect if no other hosts provider is already + * configured. + */ + @Input + boolean autoSetHostsProvider = true + + @Input + String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') + + " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + + " " + System.getProperty('tests.jvm.argline', '') + + /** + * Should the shared environment be cleaned on cluster startup? Defaults + * to {@code true} so we run with a clean cluster but some tests wish to + * preserve snapshots between clusters so they set this to true. + */ + @Input + boolean cleanShared = true + + /** + * A closure to call which returns the unicast host to connect to for cluster formation. + * + * This allows multi node clusters, or a new cluster to connect to an existing cluster. + * The closure takes three arguments, the NodeInfo for the first node in the cluster, + * the NodeInfo for the node current being configured, an AntBuilder which may be used + * to wait on conditions before returning. + */ + @Input + Closure unicastTransportUri = { NodeInfo seedNode, NodeInfo node, AntBuilder ant -> + if (seedNode == node) { + return null + } + ant.waitfor(maxwait: '40', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', + timeoutproperty: "failed.${seedNode.transportPortsFile.path}") { + resourceexists { + file(file: seedNode.transportPortsFile.toString()) + } + } + if (ant.properties.containsKey("failed.${seedNode.transportPortsFile.path}".toString())) { + throw new GradleException("Failed to locate seed node transport file [${seedNode.transportPortsFile}]: " + + "timed out waiting for it to be created after 40 seconds") + } + return seedNode.transportUri() + } + + /** + * A closure to call which returns a manually supplied list of unicast seed hosts. + */ + @Input + Closure> otherUnicastHostAddresses = { + Collections.emptyList() + } + + /** + * A closure to call before the cluster is considered ready. The closure is passed the node info, + * as well as a groovy AntBuilder, to enable running ant condition checks. The default wait + * condition is for http on the http port. + */ + @Input + Closure waitCondition = { NodeInfo node, AntBuilder ant -> + File tmpFile = new File(node.cwd, 'wait.success') + String waitUrl = "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow" + ant.echo(message: "==> [${new Date()}] checking health: ${waitUrl}", + level: 'info') + // checking here for wait_for_nodes to be >= the number of nodes because its possible + // this cluster is attempting to connect to nodes created by another task (same cluster name), + // so there will be more nodes in that case in the cluster state + ant.get(src: waitUrl, + dest: tmpFile.toString(), + ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task + retries: 10) + return tmpFile.exists() + } + + /** + * The maximum number of seconds to wait for nodes to complete startup, which includes writing + * the ports files for the transports and the pid file. This wait time occurs before the wait + * condition is executed. + */ + @Input + int nodeStartupWaitSeconds = 30 + + public ClusterConfiguration(Project project) { + this.project = project + } + + // **Note** for systemProperties, settings, keystoreFiles etc: + // value could be a GString that is evaluated to just a String + // there are cases when value depends on task that is not executed yet on configuration stage + Map systemProperties = new HashMap<>() + + Map environmentVariables = new HashMap<>() + + Map settings = new HashMap<>() + + Map keystoreSettings = new HashMap<>() + + Map keystoreFiles = new HashMap<>() + + // map from destination path, to source file + Map extraConfigFiles = new HashMap<>() + + LinkedHashMap plugins = new LinkedHashMap<>() + + List modules = new ArrayList<>() + + LinkedHashMap setupCommands = new LinkedHashMap<>() + + List dependencies = new ArrayList<>() + + @Input + void systemProperty(String property, Object value) { + systemProperties.put(property, value) + } + + @Input + void environment(String variable, Object value) { + environmentVariables.put(variable, value) + } + + @Input + void setting(String name, Object value) { + settings.put(name, value) + } + + @Input + void keystoreSetting(String name, String value) { + keystoreSettings.put(name, value) + } + + /** + * Adds a file to the keystore. The name is the secure setting name, and the sourceFile + * is anything accepted by project.file() + */ + @Input + void keystoreFile(String name, Object sourceFile) { + keystoreFiles.put(name, sourceFile) + } + + @Input + void plugin(String path) { + Project pluginProject = project.project(path) + plugins.put(pluginProject.name, pluginProject) + } + + @Input + void mavenPlugin(String name, String mavenCoords) { + plugins.put(name, mavenCoords) + } + + /** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */ + @Input + void module(Project moduleProject) { + modules.add(moduleProject) + } + + @Input + void setupCommand(String name, Object... args) { + setupCommands.put(name, args) + } + + /** + * Add an extra configuration file. The path is relative to the config dir, and the sourceFile + * is anything accepted by project.file() + */ + @Input + void extraConfigFile(String path, Object sourceFile) { + if (path == 'elasticsearch.yml') { + throw new GradleException('Overwriting elasticsearch.yml is not allowed, add additional settings using cluster { setting "foo", "bar" }') + } + extraConfigFiles.put(path, sourceFile) + } + + /** Add dependencies that must be run before the first task setting up the cluster. */ + @Input + void dependsOn(Object... deps) { + dependencies.addAll(deps) + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy new file mode 100644 index 00000000000..4327cbea767 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -0,0 +1,1019 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.test + +import org.apache.tools.ant.DefaultLogger +import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.BuildPlugin +import org.elasticsearch.gradle.BwcVersions +import org.elasticsearch.gradle.LoggedExec +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.plugin.PluginBuildPlugin +import org.elasticsearch.gradle.plugin.PluginPropertiesExtension +import org.gradle.api.AntBuilder +import org.gradle.api.DefaultTask +import org.gradle.api.GradleException +import org.gradle.api.Project +import org.gradle.api.Task +import org.gradle.api.artifacts.Configuration +import org.gradle.api.artifacts.Dependency +import org.gradle.api.file.FileCollection +import org.gradle.api.logging.Logger +import org.gradle.api.tasks.Copy +import org.gradle.api.tasks.Delete +import org.gradle.api.tasks.Exec +import org.gradle.internal.jvm.Jvm + +import java.nio.charset.StandardCharsets +import java.nio.file.Paths +import java.util.concurrent.TimeUnit +import java.util.stream.Collectors + +/** + * A helper for creating tasks to build a cluster that is used by a task, and tear down the cluster when the task is finished. + */ +class ClusterFormationTasks { + + /** + * Adds dependent tasks to the given task to start and stop a cluster with the given configuration. + * + * Returns a list of NodeInfo objects for each node in the cluster. + */ + static List setup(Project project, String prefix, Task runner, ClusterConfiguration config) { + File sharedDir = new File(project.buildDir, "cluster/shared") + Object startDependencies = config.dependencies + /* First, if we want a clean environment, we remove everything in the + * shared cluster directory to ensure there are no leftovers in repos + * or anything in theory this should not be necessary but repositories + * are only deleted in the cluster-state and not on-disk such that + * snapshots survive failures / test runs and there is no simple way + * today to fix that. */ + if (config.cleanShared) { + Task cleanup = project.tasks.create( + name: "${prefix}#prepareCluster.cleanShared", + type: Delete, + dependsOn: startDependencies) { + delete sharedDir + doLast { + sharedDir.mkdirs() + } + } + startDependencies = cleanup + } + List startTasks = [] + List nodes = [] + if (config.numNodes < config.numBwcNodes) { + throw new GradleException("numNodes must be >= numBwcNodes [${config.numNodes} < ${config.numBwcNodes}]") + } + if (config.numBwcNodes > 0 && config.bwcVersion == null) { + throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0") + } + // this is our current version distribution configuration we use for all kinds of REST tests etc. + Configuration currentDistro = project.configurations.create("${prefix}_elasticsearchDistro") + Configuration bwcDistro = project.configurations.create("${prefix}_elasticsearchBwcDistro") + Configuration bwcPlugins = project.configurations.create("${prefix}_elasticsearchBwcPlugins") + if (System.getProperty('tests.distribution', 'oss') == 'integ-test-zip') { + throw new Exception("tests.distribution=integ-test-zip is not supported") + } + configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch) + boolean hasBwcNodes = config.numBwcNodes > 0 + if (hasBwcNodes) { + if (config.bwcVersion == null) { + throw new IllegalArgumentException("Must specify bwcVersion when numBwcNodes > 0") + } + // if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version + // this version uses the same distribution etc. and only differs in the version we depend on. + // from here on everything else works the same as if it's the current version, we fetch the BWC version + // from mirrors using gradles built-in mechanism etc. + + configureDistributionDependency(project, config.distribution, bwcDistro, config.bwcVersion.toString()) + for (Map.Entry entry : config.plugins.entrySet()) { + configureBwcPluginDependency(project, entry.getValue(), bwcPlugins, config.bwcVersion) + } + bwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS) + bwcPlugins.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS) + } + for (int i = 0; i < config.numNodes; i++) { + // we start N nodes and out of these N nodes there might be M bwc nodes. + // for each of those nodes we might have a different configuration + Configuration distro + String elasticsearchVersion + if (i < config.numBwcNodes) { + elasticsearchVersion = config.bwcVersion.toString() + if (project.bwcVersions.unreleased.contains(config.bwcVersion)) { + elasticsearchVersion += "-SNAPSHOT" + } + distro = bwcDistro + } else { + elasticsearchVersion = VersionProperties.elasticsearch + distro = currentDistro + } + NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir) + nodes.add(node) + Closure writeConfigSetup + Object dependsOn + if (node.nodeVersion.onOrAfter("6.5.0")) { + writeConfigSetup = { Map esConfig -> + if (config.getAutoSetHostsProvider()) { + // Don't force discovery provider if one is set by the test cluster specs already + final String seedProvidersSettingName = + node.nodeVersion.onOrAfter("7.0.0") ? "discovery.seed_providers" : "discovery.zen.hosts_provider"; + if (esConfig.containsKey(seedProvidersSettingName) == false) { + esConfig[seedProvidersSettingName] = 'file' + } + esConfig[node.nodeVersion.onOrAfter("7.0.0") ? "discovery.seed_hosts" : "discovery.zen.ping.unicast.hosts"] = [] + } + boolean supportsInitialMasterNodes = hasBwcNodes == false || config.bwcVersion.onOrAfter("7.0.0") + if (esConfig['discovery.type'] == null && config.getAutoSetInitialMasterNodes() && supportsInitialMasterNodes) { + esConfig['cluster.initial_master_nodes'] = nodes.stream().map({ n -> + if (n.config.settings['node.name'] == null) { + return "node-" + n.nodeNum + } else { + return n.config.settings['node.name'] + } + }).collect(Collectors.toList()) + } + esConfig + } + dependsOn = startDependencies + } else { + dependsOn = startTasks.empty ? startDependencies : startTasks.get(0) + writeConfigSetup = { Map esConfig -> + String unicastTransportUri = node.config.unicastTransportUri(nodes.get(0), node, project.createAntBuilder()) + if (unicastTransportUri == null) { + esConfig['discovery.zen.ping.unicast.hosts'] = [] + } else { + esConfig['discovery.zen.ping.unicast.hosts'] = "\"${unicastTransportUri}\"" + } + esConfig + } + } + startTasks.add(configureNode(project, prefix, runner, dependsOn, node, config, distro, writeConfigSetup)) + } + + Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks, config.nodeStartupWaitSeconds) + runner.dependsOn(wait) + + return nodes + } + + /** Adds a dependency on the given distribution */ + static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) { + boolean internalBuild = project.hasProperty('bwcVersions') + if (distro.equals("integ-test-zip")) { + // short circuit integ test so it doesn't complicate the rest of the distribution setup below + if (internalBuild) { + project.dependencies.add( + configuration.name, + project.dependencies.project(path: ":distribution", configuration: 'integ-test-zip') + ) + } else { + project.dependencies.add( + configuration.name, + "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${elasticsearchVersion}@zip" + ) + } + return + } + // TEMP HACK + // The oss docs CI build overrides the distro on the command line. This hack handles backcompat until CI is updated. + if (distro.equals('oss-zip')) { + distro = 'oss' + } + if (distro.equals('zip')) { + distro = 'default' + } + // END TEMP HACK + if (['oss', 'default'].contains(distro) == false) { + throw new GradleException("Unknown distribution: ${distro} in project ${project.path}") + } + Version version = Version.fromString(elasticsearchVersion) + String os = getOs() + String classifier = "-${os}-x86_64" + String packaging = os.equals('windows') ? 'zip' : 'tar.gz' + String artifactName = 'elasticsearch' + if (distro.equals('oss') && Version.fromString(elasticsearchVersion).onOrAfter('6.3.0')) { + artifactName += '-oss' + } + Object dependency + String snapshotProject = "${os}-${os.equals('windows') ? 'zip' : 'tar'}" + if (version.before("7.0.0")) { + snapshotProject = "zip" + packaging = "zip" + } + if (distro.equals("oss")) { + snapshotProject = "oss-" + snapshotProject + } + + BwcVersions.UnreleasedVersionInfo unreleasedInfo = null + + if (project.hasProperty('bwcVersions')) { + // NOTE: leniency is needed for external plugin authors using build-tools. maybe build the version compat info into build-tools? + unreleasedInfo = project.bwcVersions.unreleasedInfo(version) + } + if (unreleasedInfo != null) { + dependency = project.dependencies.project( + path: unreleasedInfo.gradleProjectPath, configuration: snapshotProject + ) + } else if (internalBuild && elasticsearchVersion.equals(VersionProperties.elasticsearch)) { + dependency = project.dependencies.project(path: ":distribution:archives:${snapshotProject}") + } else { + if (version.before('7.0.0')) { + classifier = "" // for bwc, before we had classifiers + } + // group does not matter as it is not used when we pull from the ivy repo that points to the download service + dependency = "dnm:${artifactName}:${elasticsearchVersion}${classifier}@${packaging}" + } + project.dependencies.add(configuration.name, dependency) + } + + /** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */ + static void configureBwcPluginDependency(Project project, Object plugin, Configuration configuration, Version elasticsearchVersion) { + if (plugin instanceof Project) { + Project pluginProject = (Project)plugin + verifyProjectHasBuildPlugin(configuration.name, elasticsearchVersion, project, pluginProject) + final String pluginName = findPluginName(pluginProject) + project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip") + } else { + project.dependencies.add(configuration.name, "${plugin}@zip") + } + } + + /** + * Adds dependent tasks to start an elasticsearch cluster before the given task is executed, + * and stop it after it has finished executing. + * + * The setup of the cluster involves the following: + *
    + *
  1. Cleanup the extraction directory
  2. + *
  3. Extract a fresh copy of elasticsearch
  4. + *
  5. Write an elasticsearch.yml config file
  6. + *
  7. Copy plugins that will be installed to a temporary dir (which contains spaces)
  8. + *
  9. Install plugins
  10. + *
  11. Run additional setup commands
  12. + *
  13. Start elasticsearch
  14. + *
+ * + * @return a task which starts the node. + */ + static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, ClusterConfiguration config, + Configuration distribution, Closure writeConfig) { + + // tasks are chained so their execution order is maintained + Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) { + delete node.homeDir + delete node.cwd + } + setup = project.tasks.create(name: taskName(prefix, node, 'createCwd'), type: DefaultTask, dependsOn: setup) { + doLast { + node.cwd.mkdirs() + } + outputs.dir node.cwd + } + setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node) + setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node) + setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, distribution, config.distribution) + setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, writeConfig) + setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node) + setup = configureAddKeystoreSettingTasks(prefix, project, setup, node) + setup = configureAddKeystoreFileTasks(prefix, project, setup, node) + + if (node.config.plugins.isEmpty() == false) { + if (node.nodeVersion == Version.fromString(VersionProperties.elasticsearch)) { + setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node, prefix) + } else { + setup = configureCopyBwcPluginsTask(taskName(prefix, node, 'copyBwcPlugins'), project, setup, node, prefix) + } + } + + // install modules + for (Project module : node.config.modules) { + String actionName = pluginTaskName('install', module.name, 'Module') + setup = configureInstallModuleTask(taskName(prefix, node, actionName), project, setup, node, module) + } + + // install plugins + for (String pluginName : node.config.plugins.keySet()) { + String actionName = pluginTaskName('install', pluginName, 'Plugin') + setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, pluginName, prefix) + } + + // sets up any extra config files that need to be copied over to the ES instance; + // its run after plugins have been installed, as the extra config files may belong to plugins + setup = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, setup, node) + + // extra setup commands + for (Map.Entry command : node.config.setupCommands.entrySet()) { + // the first argument is the actual script name, relative to home + Object[] args = command.getValue().clone() + final Object commandPath + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + /* + * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to + * getting the short name requiring the path to already exist. Note that we have to capture the value of arg[0] now + * otherwise we would stack overflow later since arg[0] is replaced below. + */ + String argsZero = args[0] + commandPath = "${-> Paths.get(NodeInfo.getShortPathName(node.homeDir.toString())).resolve(argsZero.toString()).toString()}" + } else { + commandPath = node.homeDir.toPath().resolve(args[0].toString()).toString() + } + args[0] = commandPath + setup = configureExecTask(taskName(prefix, node, command.getKey()), project, setup, node, args) + } + + Task start = configureStartTask(taskName(prefix, node, 'start'), project, setup, node) + + if (node.config.daemonize) { + Task stop = configureStopTask(taskName(prefix, node, 'stop'), project, [], node) + // if we are running in the background, make sure to stop the server when the task completes + runner.finalizedBy(stop) + start.finalizedBy(stop) + for (Object dependency : config.dependencies) { + if (dependency instanceof Fixture) { + def depStop = ((Fixture)dependency).stopTask + runner.finalizedBy(depStop) + start.finalizedBy(depStop) + } + } + } + return start + } + + /** Adds a task to extract the elasticsearch distribution */ + static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node, + Configuration configuration, String distribution) { + List extractDependsOn = [configuration, setup] + /* configuration.singleFile will be an external artifact if this is being run by a plugin not living in the + elasticsearch source tree. If this is a plugin built in the elasticsearch source tree or this is a distro in + the elasticsearch source tree then this should be the version of elasticsearch built by the source tree. + If it isn't then Bad Things(TM) will happen. */ + Task extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { + if (getOs().equals("windows") || distribution.equals("integ-test-zip") || node.nodeVersion.before("7.0.0")) { + from { + project.zipTree(configuration.singleFile) + } + } else { + // macos and linux use tar + from { + project.tarTree(project.resources.gzip(configuration.singleFile)) + } + } + into node.baseDir + } + + return extract + } + + /** Adds a task to write elasticsearch.yml for the given node configuration */ + static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node, Closure configFilter) { + Map esConfig = [ + 'cluster.name' : node.clusterName, + 'node.name' : "node-" + node.nodeNum, + (node.nodeVersion.onOrAfter('7.4.0') ? 'node.pidfile' : 'pidfile') : node.pidFile, + 'path.repo' : "${node.sharedDir}/repo", + 'path.shared_data' : "${node.sharedDir}/", + // Define a node attribute so we can test that it exists + 'node.attr.testattr' : 'test', + // Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master + 'discovery.initial_state_timeout' : '0s' + ] + int minimumMasterNodes = node.config.minimumMasterNodes.call() + if (node.nodeVersion.before("7.0.0") && minimumMasterNodes > 0) { + esConfig['discovery.zen.minimum_master_nodes'] = minimumMasterNodes + } + if (node.nodeVersion.before("7.0.0") && esConfig.containsKey('discovery.zen.master_election.wait_for_joins_timeout') == false) { + // If a node decides to become master based on partial information from the pinging, don't let it hang for 30 seconds to correct + // its mistake. Instead, only wait 5s to do another round of pinging. + // This is necessary since we use 30s as the default timeout in REST requests waiting for cluster formation + // so we need to bail quicker than the default 30s for the cluster to form in time. + esConfig['discovery.zen.master_election.wait_for_joins_timeout'] = '5s' + } + esConfig['http.port'] = node.config.httpPort + if (node.nodeVersion.onOrAfter('6.7.0')) { + esConfig['transport.port'] = node.config.transportPort + } else { + esConfig['transport.tcp.port'] = node.config.transportPort + } + // Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space + esConfig['cluster.routing.allocation.disk.watermark.low'] = '1b' + esConfig['cluster.routing.allocation.disk.watermark.high'] = '1b' + if (node.nodeVersion.major >= 6) { + esConfig['cluster.routing.allocation.disk.watermark.flood_stage'] = '1b' + } + // increase script compilation limit since tests can rapid-fire script compilations + esConfig['script.max_compilations_rate'] = '2048/1m' + // Temporarily disable the real memory usage circuit breaker. It depends on real memory usage which we have no full control + // over and the REST client will not retry on circuit breaking exceptions yet (see #31986 for details). Once the REST client + // can retry on circuit breaking exceptions, we can revert again to the default configuration. + if (node.nodeVersion.major >= 7) { + esConfig['indices.breaker.total.use_real_memory'] = false + } + + Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) + writeConfig.doFirst { + for (Map.Entry setting : node.config.settings) { + if (setting.value == null) { + esConfig.remove(setting.key) + } else { + esConfig.put(setting.key, setting.value) + } + } + + esConfig = configFilter.call(esConfig) + File configFile = new File(node.pathConf, 'elasticsearch.yml') + logger.info("Configuring ${configFile}") + configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8') + } + } + + /** Adds a task to create keystore */ + static Task configureCreateKeystoreTask(String name, Project project, Task setup, NodeInfo node) { + if (node.config.keystoreSettings.isEmpty() && node.config.keystoreFiles.isEmpty()) { + return setup + } else { + /* + * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to + * getting the short name requiring the path to already exist. + */ + final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}" + return configureExecTask(name, project, setup, node, esKeystoreUtil, 'create') + } + } + + /** Adds tasks to add settings to the keystore */ + static Task configureAddKeystoreSettingTasks(String parent, Project project, Task setup, NodeInfo node) { + Map kvs = node.config.keystoreSettings + Task parentTask = setup + /* + * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting + * the short name requiring the path to already exist. + */ + final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}" + for (Map.Entry entry in kvs) { + String key = entry.getKey() + String name = taskName(parent, node, 'addToKeystore#' + key) + Task t = configureExecTask(name, project, parentTask, node, esKeystoreUtil, 'add', key, '-x') + String settingsValue = entry.getValue() // eval this early otherwise it will not use the right value + t.doFirst { + standardInput = new ByteArrayInputStream(settingsValue.getBytes(StandardCharsets.UTF_8)) + } + parentTask = t + } + return parentTask + } + + /** Adds tasks to add files to the keystore */ + static Task configureAddKeystoreFileTasks(String parent, Project project, Task setup, NodeInfo node) { + Map kvs = node.config.keystoreFiles + if (kvs.isEmpty()) { + return setup + } + Task parentTask = setup + /* + * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting + * the short name requiring the path to already exist. + */ + final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}" + for (Map.Entry entry in kvs) { + String key = entry.getKey() + String name = taskName(parent, node, 'addToKeystore#' + key) + String srcFileName = entry.getValue() + Task t = configureExecTask(name, project, parentTask, node, esKeystoreUtil, 'add-file', key, srcFileName) + t.doFirst { + File srcFile = project.file(srcFileName) + if (srcFile.isDirectory()) { + throw new GradleException("Source for keystoreFile must be a file: ${srcFile}") + } + if (srcFile.exists() == false) { + throw new GradleException("Source file for keystoreFile does not exist: ${srcFile}") + } + } + parentTask = t + } + return parentTask + } + + static Task configureExtraConfigFilesTask(String name, Project project, Task setup, NodeInfo node) { + if (node.config.extraConfigFiles.isEmpty()) { + return setup + } + Copy copyConfig = project.tasks.create(name: name, type: Copy, dependsOn: setup) + File configDir = new File(node.homeDir, 'config') + copyConfig.into(configDir) // copy must always have a general dest dir, even though we don't use it + for (Map.Entry extraConfigFile : node.config.extraConfigFiles.entrySet()) { + Object extraConfigFileValue = extraConfigFile.getValue() + copyConfig.doFirst { + // make sure the copy won't be a no-op or act on a directory + File srcConfigFile = project.file(extraConfigFileValue) + if (srcConfigFile.isDirectory()) { + throw new GradleException("Source for extraConfigFile must be a file: ${srcConfigFile}") + } + if (srcConfigFile.exists() == false) { + throw new GradleException("Source file for extraConfigFile does not exist: ${srcConfigFile}") + } + } + File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey()) + // wrap source file in closure to delay resolution to execution time + copyConfig.from({ extraConfigFileValue }) { + // this must be in a closure so it is only applied to the single file specified in from above + into(configDir.toPath().relativize(destConfigFile.canonicalFile.parentFile.toPath()).toFile()) + rename { destConfigFile.name } + } + } + return copyConfig + } + + /** + * Adds a task to copy plugins to a temp dir, which they will later be installed from. + * + * For each plugin, if the plugin has rest spec apis in its tests, those api files are also copied + * to the test resources for this project. + */ + static Task configureCopyPluginsTask(String name, Project project, Task setup, NodeInfo node, String prefix) { + Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup) + + List pluginFiles = [] + for (Map.Entry plugin : node.config.plugins.entrySet()) { + + String configurationName = pluginConfigurationName(prefix, plugin.key) + Configuration configuration = project.configurations.findByName(configurationName) + if (configuration == null) { + configuration = project.configurations.create(configurationName) + } + + if (plugin.getValue() instanceof Project) { + Project pluginProject = plugin.getValue() + verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject) + + project.dependencies.add(configurationName, project.dependencies.project(path: pluginProject.path, configuration: 'zip')) + setup.dependsOn(pluginProject.tasks.bundlePlugin) + + // also allow rest tests to use the rest spec from the plugin + String copyRestSpecTaskName = pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec') + Copy copyRestSpec = project.tasks.findByName(copyRestSpecTaskName) + for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) { + File restApiDir = new File(resourceDir, 'rest-api-spec/api') + if (restApiDir.exists() == false) continue + if (copyRestSpec == null) { + copyRestSpec = project.tasks.create(name: copyRestSpecTaskName, type: Copy) + copyPlugins.dependsOn(copyRestSpec) + copyRestSpec.into(project.sourceSets.test.output.resourcesDir) + } + copyRestSpec.from(resourceDir).include('rest-api-spec/api/**') + } + } else { + project.dependencies.add(configurationName, "${plugin.getValue()}@zip") + } + + + + pluginFiles.add(configuration) + } + + copyPlugins.into(node.pluginsTmpDir) + copyPlugins.from(pluginFiles) + return copyPlugins + } + + private static String pluginConfigurationName(final String prefix, final String name) { + return "_plugin_${prefix}_${name}".replace(':', '_') + } + + private static String pluginBwcConfigurationName(final String prefix, final String name) { + return "_plugin_bwc_${prefix}_${name}".replace(':', '_') + } + + /** Configures task to copy a plugin based on a zip file resolved using dependencies for an older version */ + static Task configureCopyBwcPluginsTask(String name, Project project, Task setup, NodeInfo node, String prefix) { + Configuration bwcPlugins = project.configurations.getByName("${prefix}_elasticsearchBwcPlugins") + for (Map.Entry plugin : node.config.plugins.entrySet()) { + String configurationName = pluginBwcConfigurationName(prefix, plugin.key) + Configuration configuration = project.configurations.findByName(configurationName) + if (configuration == null) { + configuration = project.configurations.create(configurationName) + } + + if (plugin.getValue() instanceof Project) { + Project pluginProject = plugin.getValue() + verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject) + + final String depName = findPluginName(pluginProject) + + Dependency dep = bwcPlugins.dependencies.find { + it.name == depName + } + configuration.dependencies.add(dep) + } else { + project.dependencies.add(configurationName, "${plugin.getValue()}@zip") + } + } + + Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup) { + from bwcPlugins + into node.pluginsTmpDir + } + return copyPlugins + } + + static Task configureInstallModuleTask(String name, Project project, Task setup, NodeInfo node, Project module) { + if (node.config.distribution != 'integ-test-zip') { + project.logger.info("Not installing modules for $name, ${node.config.distribution} already has them") + return setup + } + if (module.plugins.hasPlugin(PluginBuildPlugin) == false) { + throw new GradleException("Task ${name} cannot include module ${module.path} which is not an esplugin") + } + Copy installModule = project.tasks.create(name, Copy.class) + installModule.dependsOn(setup) + installModule.dependsOn(module.tasks.bundlePlugin) + installModule.into(new File(node.homeDir, "modules/${module.name}")) + installModule.from({ project.zipTree(module.tasks.bundlePlugin.outputs.files.singleFile) }) + return installModule + } + + static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, String pluginName, String prefix) { + FileCollection pluginZip; + if (node.nodeVersion != Version.fromString(VersionProperties.elasticsearch)) { + pluginZip = project.configurations.getByName(pluginBwcConfigurationName(prefix, pluginName)) + } else { + pluginZip = project.configurations.getByName(pluginConfigurationName(prefix, pluginName)) + } + // delay reading the file location until execution time by wrapping in a closure within a GString + final Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}" + /* + * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting + * the short name requiring the path to already exist. + */ + final Object esPluginUtil = "${-> node.binPath().resolve('elasticsearch-plugin').toString()}" + final Object[] args = [esPluginUtil, 'install', '--batch', file] + return configureExecTask(name, project, setup, node, args) + } + + /** Wrapper for command line argument: surrounds comma with double quotes **/ + private static class EscapeCommaWrapper { + + Object arg + + public String toString() { + String s = arg.toString() + + /// Surround strings that contains a comma with double quotes + if (s.indexOf(',') != -1) { + return "\"${s}\"" + } + return s + } + } + + /** Adds a task to execute a command to help setup the cluster */ + static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) { + return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) { Exec exec -> + exec.workingDir node.cwd + if (useRuntimeJava(project, node)) { + exec.environment.put('JAVA_HOME', project.runtimeJavaHome) + } else { + // force JAVA_HOME to *not* be set + exec.environment.remove('JAVA_HOME') + } + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + exec.executable 'cmd' + exec.args '/C', 'call' + // On Windows the comma character is considered a parameter separator: + // argument are wrapped in an ExecArgWrapper that escapes commas + exec.args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) } + } else { + exec.commandLine execArgs + } + } + } + + public static boolean useRuntimeJava(Project project, NodeInfo node) { + return (project.isRuntimeJavaHomeSet || + (node.isBwcNode == false && node.nodeVersion.before(Version.fromString("7.0.0"))) || + node.config.distribution == 'integ-test-zip') + } + + /** Adds a task to start an elasticsearch node with the given configuration */ + static Task configureStartTask(String name, Project project, Task setup, NodeInfo node) { + // this closure is converted into ant nodes by groovy's AntBuilder + Closure antRunner = { AntBuilder ant -> + ant.exec(executable: node.executable, spawn: node.config.daemonize, newenvironment: true, + dir: node.cwd, taskname: 'elasticsearch') { + node.env.each { key, value -> env(key: key, value: value) } + if (useRuntimeJava(project, node)) { + env(key: 'JAVA_HOME', value: project.runtimeJavaHome) + } + node.args.each { arg(value: it) } + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + // Having no TMP on Windows defaults to C:\Windows and permission errors + // Since we configure ant to run with a new environment above, we need to explicitly pass this + String tmp = System.getenv("TMP") + assert tmp != null + env(key: "TMP", value: tmp) + } + } + } + + // this closure is the actual code to run elasticsearch + Closure elasticsearchRunner = { + // Due to how ant exec works with the spawn option, we lose all stdout/stderr from the + // process executed. To work around this, when spawning, we wrap the elasticsearch start + // command inside another shell script, which simply internally redirects the output + // of the real elasticsearch script. This allows ant to keep the streams open with the + // dummy process, but us to have the output available if there is an error in the + // elasticsearch start script + if (node.config.daemonize) { + node.writeWrapperScript() + } + + node.getCommandString().eachLine { line -> logger.info(line) } + + if (logger.isInfoEnabled() || node.config.daemonize == false) { + runAntCommand(project, antRunner, System.out, System.err) + } else { + // buffer the output, we may not need to print it + PrintStream captureStream = new PrintStream(node.buffer, true, "UTF-8") + runAntCommand(project, antRunner, captureStream, captureStream) + } + } + + Task start = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) + if (node.javaVersion != null) { + BuildPlugin.requireJavaHome(start, node.javaVersion) + } + start.doLast(elasticsearchRunner) + start.doFirst { + // If the node runs in a FIPS 140-2 JVM, the BCFKS default keystore will be password protected + if (project.inFipsJvm){ + node.config.systemProperties.put('javax.net.ssl.trustStorePassword', 'password') + node.config.systemProperties.put('javax.net.ssl.keyStorePassword', 'password') + } + + // Configure ES JAVA OPTS - adds system properties, assertion flags, remote debug etc + List esJavaOpts = [node.env.get('ES_JAVA_OPTS', '')] + String collectedSystemProperties = node.config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ") + esJavaOpts.add(collectedSystemProperties) + esJavaOpts.add(node.config.jvmArgs) + if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) { + // put the enable assertions options before other options to allow + // flexibility to disable assertions for specific packages or classes + // in the cluster-specific options + esJavaOpts.add("-ea") + esJavaOpts.add("-esa") + } + // we must add debug options inside the closure so the config is read at execution time, as + // gradle task options are not processed until the end of the configuration phase + if (node.config.debug) { + println 'Running elasticsearch in debug mode, suspending until connected on port 8000' + esJavaOpts.add('-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000') + } + node.env['ES_JAVA_OPTS'] = esJavaOpts.join(" ") + + // + project.logger.info("Starting node in ${node.clusterName} distribution: ${node.config.distribution}") + } + return start + } + + static Task configureWaitTask(String name, Project project, List nodes, List startTasks, int waitSeconds) { + Task wait = project.tasks.create(name: name, dependsOn: startTasks) + wait.doLast { + + Collection unicastHosts = new HashSet<>() + nodes.forEach { node -> + unicastHosts.addAll(node.config.otherUnicastHostAddresses.call()) + String unicastHost = node.config.unicastTransportUri(node, null, project.createAntBuilder()) + if (unicastHost != null) { + unicastHosts.add(unicastHost) + } + } + String unicastHostsTxt = String.join("\n", unicastHosts) + nodes.forEach { node -> + node.pathConf.toPath().resolve("unicast_hosts.txt").setText(unicastHostsTxt) + } + + ant.waitfor(maxwait: "${waitSeconds}", maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") { + or { + for (NodeInfo node : nodes) { + resourceexists { + file(file: node.failedMarker.toString()) + } + } + and { + for (NodeInfo node : nodes) { + resourceexists { + file(file: node.pidFile.toString()) + } + resourceexists { + file(file: node.httpPortsFile.toString()) + } + resourceexists { + file(file: node.transportPortsFile.toString()) + } + } + } + } + } + if (ant.properties.containsKey("failed${name}".toString())) { + waitFailed(project, nodes, logger, "Failed to start elasticsearch: timed out after ${waitSeconds} seconds") + } + + boolean anyNodeFailed = false + for (NodeInfo node : nodes) { + if (node.failedMarker.exists()) { + logger.error("Failed to start elasticsearch: ${node.failedMarker.toString()} exists") + anyNodeFailed = true + } + } + if (anyNodeFailed) { + waitFailed(project, nodes, logger, 'Failed to start elasticsearch') + } + + // make sure all files exist otherwise we haven't fully started up + boolean missingFile = false + for (NodeInfo node : nodes) { + missingFile |= node.pidFile.exists() == false + missingFile |= node.httpPortsFile.exists() == false + missingFile |= node.transportPortsFile.exists() == false + } + if (missingFile) { + waitFailed(project, nodes, logger, 'Elasticsearch did not complete startup in time allotted') + } + + // go through each node checking the wait condition + for (NodeInfo node : nodes) { + // first bind node info to the closure, then pass to the ant runner so we can get good logging + Closure antRunner = node.config.waitCondition.curry(node) + + boolean success + if (logger.isInfoEnabled()) { + success = runAntCommand(project, antRunner, System.out, System.err) + } else { + PrintStream captureStream = new PrintStream(node.buffer, true, "UTF-8") + success = runAntCommand(project, antRunner, captureStream, captureStream) + } + + if (success == false) { + waitFailed(project, nodes, logger, 'Elasticsearch cluster failed to pass wait condition') + } + } + } + return wait + } + + static void waitFailed(Project project, List nodes, Logger logger, String msg) { + for (NodeInfo node : nodes) { + if (logger.isInfoEnabled() == false) { + // We already log the command at info level. No need to do it twice. + node.getCommandString().eachLine { line -> logger.error(line) } + } + logger.error("Node ${node.nodeNum} output:") + logger.error("|-----------------------------------------") + logger.error("| failure marker exists: ${node.failedMarker.exists()}") + logger.error("| pid file exists: ${node.pidFile.exists()}") + logger.error("| http ports file exists: ${node.httpPortsFile.exists()}") + logger.error("| transport ports file exists: ${node.transportPortsFile.exists()}") + // the waitfor failed, so dump any output we got (if info logging this goes directly to stdout) + logger.error("|\n| [ant output]") + node.buffer.toString('UTF-8').eachLine { line -> logger.error("| ${line}") } + // also dump the log file for the startup script (which will include ES logging output to stdout) + if (node.startLog.exists()) { + logger.error("|\n| [log]") + node.startLog.eachLine { line -> logger.error("| ${line}") } + } + if (node.pidFile.exists() && node.failedMarker.exists() == false && + (node.httpPortsFile.exists() == false || node.transportPortsFile.exists() == false)) { + logger.error("|\n| [jstack]") + String pid = node.pidFile.getText('UTF-8') + ByteArrayOutputStream output = new ByteArrayOutputStream() + project.exec { + commandLine = ["${project.runtimeJavaHome}/bin/jstack", pid] + standardOutput = output + } + output.toString('UTF-8').eachLine { line -> logger.error("| ${line}") } + } + logger.error("|-----------------------------------------") + } + throw new GradleException(msg) + } + + /** Adds a task to check if the process with the given pidfile is actually elasticsearch */ + static Task configureCheckPreviousTask(String name, Project project, Object depends, NodeInfo node) { + return project.tasks.create(name: name, type: Exec, dependsOn: depends) { + onlyIf { node.pidFile.exists() } + // the pid file won't actually be read until execution time, since the read is wrapped within an inner closure of the GString + ext.pid = "${ -> node.pidFile.getText('UTF-8').trim()}" + final File jps = Jvm.forHome(project.runtimeJavaHome).getExecutable('jps') + commandLine jps, '-l' + standardOutput = new ByteArrayOutputStream() + doLast { + String out = standardOutput.toString() + if (out.contains("${ext.pid} org.elasticsearch.bootstrap.Elasticsearch") == false) { + logger.error('jps -l') + logger.error(out) + logger.error("pid file: ${node.pidFile}") + logger.error("pid: ${ext.pid}") + throw new GradleException("jps -l did not report any process with org.elasticsearch.bootstrap.Elasticsearch\n" + + "Did you run gradle clean? Maybe an old pid file is still lying around.") + } else { + logger.info(out) + } + } + } + } + + /** Adds a task to kill an elasticsearch node with the given pidfile */ + static Task configureStopTask(String name, Project project, Object depends, NodeInfo node) { + return project.tasks.create(name: name, type: LoggedExec, dependsOn: depends) { + onlyIf { node.pidFile.exists() } + // the pid file won't actually be read until execution time, since the read is wrapped within an inner closure of the GString + ext.pid = "${ -> node.pidFile.getText('UTF-8').trim()}" + doFirst { + logger.info("Shutting down external node with pid ${pid}") + } + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + executable 'Taskkill' + args '/PID', pid, '/F' + } else { + executable 'kill' + args '-9', pid + } + doLast { + project.delete(node.pidFile) + // Large tests can exhaust disk space, clean up jdk from the distribution to save some space + project.delete(new File(node.homeDir, "jdk")) + } + } + } + + /** Returns a unique task name for this task and node configuration */ + static String taskName(String prefix, NodeInfo node, String action) { + if (node.config.numNodes > 1) { + return "${prefix}#node${node.nodeNum}.${action}" + } else { + return "${prefix}#${action}" + } + } + + public static String pluginTaskName(String action, String name, String suffix) { + // replace every dash followed by a character with just the uppercase character + String camelName = name.replaceAll(/-(\w)/) { _, c -> c.toUpperCase(Locale.ROOT) } + return action + camelName[0].toUpperCase(Locale.ROOT) + camelName.substring(1) + suffix + } + + /** Runs an ant command, sending output to the given out and error streams */ + static Object runAntCommand(Project project, Closure command, PrintStream outputStream, PrintStream errorStream) { + DefaultLogger listener = new DefaultLogger( + errorPrintStream: errorStream, + outputPrintStream: outputStream, + messageOutputLevel: org.apache.tools.ant.Project.MSG_INFO) + + AntBuilder ant = project.createAntBuilder() + ant.project.addBuildListener(listener) + Object retVal = command(ant) + ant.project.removeBuildListener(listener) + return retVal + } + + static void verifyProjectHasBuildPlugin(String name, Version version, Project project, Project pluginProject) { + if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) { + throw new GradleException("Task [${name}] cannot add plugin [${pluginProject.path}] with version [${version}] to project's " + + "[${project.path}] dependencies: the plugin is not an esplugin") + } + } + + /** Find the plugin name in the given project. */ + static String findPluginName(Project pluginProject) { + PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin') + return extension.name + } + + /** Find the current OS */ + static String getOs() { + String os = "linux" + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + os = "windows" + } else if (Os.isFamily(Os.FAMILY_MAC)) { + os = "darwin" + } + return os + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy new file mode 100644 index 00000000000..cb7a8397ed0 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -0,0 +1,297 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.test + +import com.sun.jna.Native +import com.sun.jna.WString +import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionProperties +import org.gradle.api.Project + +import java.nio.file.Files +import java.nio.file.Path +import java.nio.file.Paths +/** + * A container for the files and configuration associated with a single node in a test cluster. + */ +class NodeInfo { + /** Gradle project this node is part of */ + Project project + + /** common configuration for all nodes, including this one */ + ClusterConfiguration config + + /** node number within the cluster, for creating unique names and paths */ + int nodeNum + + /** name of the cluster this node is part of */ + String clusterName + + /** root directory all node files and operations happen under */ + File baseDir + + /** shared data directory all nodes share */ + File sharedDir + + /** the pid file the node will use */ + File pidFile + + /** a file written by elasticsearch containing the ports of each bound address for http */ + File httpPortsFile + + /** a file written by elasticsearch containing the ports of each bound address for transport */ + File transportPortsFile + + /** elasticsearch home dir */ + File homeDir + + /** config directory */ + File pathConf + + /** data directory (as an Object, to allow lazy evaluation) */ + Object dataDir + + /** THE config file */ + File configFile + + /** working directory for the node process */ + File cwd + + /** file that if it exists, indicates the node failed to start */ + File failedMarker + + /** stdout/stderr log of the elasticsearch process for this node */ + File startLog + + /** directory to install plugins from */ + File pluginsTmpDir + + /** Major version of java this node runs with, or {@code null} if using the runtime java version */ + Integer javaVersion + + /** environment variables to start the node with */ + Map env + + /** arguments to start the node with */ + List args + + /** Executable to run the bin/elasticsearch with, either cmd or sh */ + String executable + + /** Path to the elasticsearch start script */ + private Object esScript + + /** script to run when running in the background */ + private File wrapperScript + + /** buffer for ant output when starting this node */ + ByteArrayOutputStream buffer = new ByteArrayOutputStream() + + /** the version of elasticsearch that this node runs */ + Version nodeVersion + + /** true if the node is not the current version */ + boolean isBwcNode + + /** Holds node configuration for part of a test cluster. */ + NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, String nodeVersion, File sharedDir) { + this.config = config + this.nodeNum = nodeNum + this.project = project + this.sharedDir = sharedDir + if (config.clusterName != null) { + clusterName = config.clusterName + } else { + clusterName = project.path.replace(':', '_').substring(1) + '_' + prefix + } + baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}") + pidFile = new File(baseDir, 'es.pid') + this.nodeVersion = Version.fromString(nodeVersion) + this.isBwcNode = this.nodeVersion.before(VersionProperties.elasticsearch) + homeDir = new File(baseDir, "elasticsearch-${nodeVersion}") + pathConf = new File(homeDir, 'config') + if (config.dataDir != null) { + dataDir = "${config.dataDir(nodeNum)}" + } else { + dataDir = new File(homeDir, "data") + } + configFile = new File(pathConf, 'elasticsearch.yml') + // even for rpm/deb, the logs are under home because we dont start with real services + File logsDir = new File(homeDir, 'logs') + httpPortsFile = new File(logsDir, 'http.ports') + transportPortsFile = new File(logsDir, 'transport.ports') + cwd = new File(baseDir, "cwd") + failedMarker = new File(cwd, 'run.failed') + startLog = new File(cwd, 'run.log') + pluginsTmpDir = new File(baseDir, "plugins tmp") + + args = [] + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + executable = 'cmd' + args.add('/C') + args.add('"') // quote the entire command + wrapperScript = new File(cwd, "run.bat") + /* + * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to + * getting the short name requiring the path to already exist. + */ + esScript = "${-> binPath().resolve('elasticsearch.bat').toString()}" + } else { + executable = 'bash' + wrapperScript = new File(cwd, "run") + esScript = binPath().resolve('elasticsearch') + } + if (config.daemonize) { + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + /* + * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to + * getting the short name requiring the path to already exist. + */ + args.add("${-> getShortPathName(wrapperScript.toString())}") + } else { + args.add("${wrapperScript}") + } + } else { + args.add("${esScript}") + } + + + if (this.nodeVersion.before("6.2.0")) { + javaVersion = 8 + } else if (this.nodeVersion.onOrAfter("6.2.0") && this.nodeVersion.before("6.3.0")) { + javaVersion = 9 + } else if (this.nodeVersion.onOrAfter("6.3.0") && this.nodeVersion.before("6.5.0")) { + javaVersion = 10 + } + + args.addAll("-E", "node.portsfile=true") + env = [:] + env.putAll(config.environmentVariables) + for (Map.Entry property : System.properties.entrySet()) { + if (property.key.startsWith('tests.es.')) { + args.add("-E") + args.add("${property.key.substring('tests.es.'.size())}=${property.value}") + } + } + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + /* + * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to + * getting the short name requiring the path to already exist. + */ + env.put('ES_PATH_CONF', "${-> getShortPathName(pathConf.toString())}") + } + else { + env.put('ES_PATH_CONF', pathConf) + } + if (!System.properties.containsKey("tests.es.path.data")) { + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + /* + * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to + * getting the short name requiring the path to already exist. This one is extra tricky because usually we rely on the node + * creating its data directory on startup but we simply can not do that here because getting the short path name requires + * the directory to already exist. Therefore, we create this directory immediately before getting the short name. + */ + args.addAll("-E", "path.data=${-> Files.createDirectories(Paths.get(dataDir.toString())); getShortPathName(dataDir.toString())}") + } else { + args.addAll("-E", "path.data=${-> dataDir.toString()}") + } + } + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + args.add('"') // end the entire command, quoted + } + } + + Path binPath() { + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + return Paths.get(getShortPathName(new File(homeDir, 'bin').toString())) + } else { + return Paths.get(new File(homeDir, 'bin').toURI()) + } + } + + static String getShortPathName(String path) { + assert Os.isFamily(Os.FAMILY_WINDOWS) + final WString longPath = new WString("\\\\?\\" + path) + // first we get the length of the buffer needed + final int length = JNAKernel32Library.getInstance().GetShortPathNameW(longPath, null, 0) + if (length == 0) { + throw new IllegalStateException("path [" + path + "] encountered error [" + Native.getLastError() + "]") + } + final char[] shortPath = new char[length] + // knowing the length of the buffer, now we get the short name + if (JNAKernel32Library.getInstance().GetShortPathNameW(longPath, shortPath, length) == 0) { + throw new IllegalStateException("path [" + path + "] encountered error [" + Native.getLastError() + "]") + } + // we have to strip the \\?\ away from the path for cmd.exe + return Native.toString(shortPath).substring(4) + } + + /** Returns debug string for the command that started this node. */ + String getCommandString() { + String esCommandString = "\nNode ${nodeNum} configuration:\n" + esCommandString += "|-----------------------------------------\n" + esCommandString += "| cwd: ${cwd}\n" + esCommandString += "| command: ${executable} ${args.join(' ')}\n" + esCommandString += '| environment:\n' + env.each { k, v -> esCommandString += "| ${k}: ${v}\n" } + if (config.daemonize) { + esCommandString += "|\n| [${wrapperScript.name}]\n" + wrapperScript.eachLine('UTF-8', { line -> esCommandString += " ${line}\n"}) + } + esCommandString += '|\n| [elasticsearch.yml]\n' + configFile.eachLine('UTF-8', { line -> esCommandString += "| ${line}\n" }) + esCommandString += "|-----------------------------------------" + return esCommandString + } + + void writeWrapperScript() { + String argsPasser = '"$@"' + String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi" + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + argsPasser = '%*' + exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )" + } + wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8') + } + + /** Returns an address and port suitable for a uri to connect to this node over http */ + String httpUri() { + return httpPortsFile.readLines("UTF-8").get(0) + } + + /** Returns an address and port suitable for a uri to connect to this node over transport protocol */ + String transportUri() { + return transportPortsFile.readLines("UTF-8").get(0) + } + + /** Returns the file which contains the transport protocol ports for this node */ + File getTransportPortsFile() { + return transportPortsFile + } + + /** Returns the data directory for this node */ + File getDataDir() { + if (!(dataDir instanceof File)) { + return new File(dataDir) + } + return dataDir + } +}