Add Docker packaging tests on 7.x (#48857)

Backport of #46599 and #47640. Add packaging tests for Docker.

* Introduce packaging tests for Docker (#46599)

Closes #37617. Add packaging tests for our Docker images, similar to what
we have for RPMs or Debian packages. This works by running a container and
probing it e.g. via `docker exec`. Test can also be run in Vagrant, by
exporting the Docker images to disk and loading them again in VMs. Docker
is installed via `Vagrantfile` in a selection of boxes.

* Only define Docker pkg tests if Docker is available (#47640)

Closes #47639, and unmutes tests that were muted in b958467.

The Docker packaging tests were being defined irrespective of whether
Docker was actually available in the current environment. Instead,
implement exclude lists so that in environments where Docker is not
available, no Docker packaging tests are defined. For CI hosts, the build
checks `.ci/dockerOnLinuxExclusions`. The Vagrant VMs can defined the
extension property `shouldTestDocker` property to opt-in to packaging
tests.

As part of this, define a seperate utility class for checking Docker,
and call that instead of defining checks in-line in BuildPlugin.groovy
This commit is contained in:
Rory Hunter 2019-11-05 15:17:59 +00:00 committed by GitHub
parent baabc21a04
commit 24f7d4e83b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 1347 additions and 124 deletions

View File

@ -0,0 +1,12 @@
# This file specifies the Linux OS versions on which we can't build and
# test Docker images for some reason. These values correspond to ID and
# VERSION_ID from /etc/os-release, and a matching value will cause the
# Docker tests to be skipped on that OS. If /etc/os-release doesn't exist
# (as is the case on centos-6, for example) then that OS will again be
# excluded.
centos-6
debian-8
opensuse-15-1
ol-6.10
ol-7.7
sles-12

View File

@ -6,6 +6,16 @@ if which zypper > /dev/null ; then
sudo zypper install -y insserv-compat sudo zypper install -y insserv-compat
fi fi
if [ -e /etc/sysctl.d/99-gce.conf ]; then
# The GCE defaults disable IPv4 forwarding, which breaks the Docker
# build. Workaround this by renaming the file so that it is executed
# earlier than our own overrides.
#
# This ultimately needs to be fixed at the image level - see infra
# issue 15654.
sudo mv /etc/sysctl.d/99-gce.conf /etc/sysctl.d/98-gce.conf
fi
# Required by bats # Required by bats
sudo touch /etc/is_vagrant_vm sudo touch /etc/is_vagrant_vm
sudo useradd vagrant sudo useradd vagrant

109
Vagrantfile vendored
View File

@ -1,5 +1,5 @@
# -*- mode: ruby -*- # -*- mode: ruby -*-
# vi: set ft=ruby : # vim: ft=ruby ts=2 sw=2 sts=2 et:
# This Vagrantfile exists to test packaging. Read more about its use in the # This Vagrantfile exists to test packaging. Read more about its use in the
# vagrant section in TESTING.asciidoc. # vagrant section in TESTING.asciidoc.
@ -63,6 +63,7 @@ Vagrant.configure(2) do |config|
# Install Jayatana so we can work around it being present. # Install Jayatana so we can work around it being present.
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana [ -f /usr/share/java/jayatanaag.jar ] || install jayatana
SHELL SHELL
ubuntu_docker config
end end
end end
'ubuntu-1804'.tap do |box| 'ubuntu-1804'.tap do |box|
@ -72,6 +73,7 @@ Vagrant.configure(2) do |config|
# Install Jayatana so we can work around it being present. # Install Jayatana so we can work around it being present.
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana [ -f /usr/share/java/jayatanaag.jar ] || install jayatana
SHELL SHELL
ubuntu_docker config
end end
end end
# Wheezy's backports don't contain Openjdk 8 and the backflips # Wheezy's backports don't contain Openjdk 8 and the backflips
@ -90,6 +92,7 @@ Vagrant.configure(2) do |config|
config.vm.define box, define_opts do |config| config.vm.define box, define_opts do |config|
config.vm.box = 'elastic/debian-9-x86_64' config.vm.box = 'elastic/debian-9-x86_64'
deb_common config, box deb_common config, box
deb_docker config
end end
end end
'centos-6'.tap do |box| 'centos-6'.tap do |box|
@ -102,6 +105,7 @@ Vagrant.configure(2) do |config|
config.vm.define box, define_opts do |config| config.vm.define box, define_opts do |config|
config.vm.box = 'elastic/centos-7-x86_64' config.vm.box = 'elastic/centos-7-x86_64'
rpm_common config, box rpm_common config, box
rpm_docker config
end end
end end
'oel-6'.tap do |box| 'oel-6'.tap do |box|
@ -120,12 +124,14 @@ Vagrant.configure(2) do |config|
config.vm.define box, define_opts do |config| config.vm.define box, define_opts do |config|
config.vm.box = 'elastic/fedora-28-x86_64' config.vm.box = 'elastic/fedora-28-x86_64'
dnf_common config, box dnf_common config, box
dnf_docker config
end end
end end
'fedora-29'.tap do |box| 'fedora-29'.tap do |box|
config.vm.define box, define_opts do |config| config.vm.define box, define_opts do |config|
config.vm.box = 'elastic/fedora-28-x86_64' config.vm.box = 'elastic/fedora-29-x86_64'
dnf_common config, box dnf_common config, box
dnf_docker config
end end
end end
'opensuse-42'.tap do |box| 'opensuse-42'.tap do |box|
@ -188,6 +194,67 @@ def deb_common(config, name, extra: '')
) )
end end
def ubuntu_docker(config)
config.vm.provision 'install Docker using apt', type: 'shell', inline: <<-SHELL
# Install packages to allow apt to use a repository over HTTPS
apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
gnupg2 \
software-properties-common
# Add Dockers official GPG key
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
# Set up the stable Docker repository
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
# Install Docker. Unlike Fedora and CentOS, this also start the daemon.
apt-get update
apt-get install -y docker-ce docker-ce-cli containerd.io
# Add vagrant to the Docker group, so that it can run commands
usermod -aG docker vagrant
# Enable IPv4 forwarding
sed -i '/net.ipv4.ip_forward/s/^#//' /etc/sysctl.conf
systemctl restart networking
SHELL
end
def deb_docker(config)
config.vm.provision 'install Docker using apt', type: 'shell', inline: <<-SHELL
# Install packages to allow apt to use a repository over HTTPS
apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
gnupg2 \
software-properties-common
# Add Dockers official GPG key
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add -
# Set up the stable Docker repository
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/debian \
$(lsb_release -cs) \
stable"
# Install Docker. Unlike Fedora and CentOS, this also start the daemon.
apt-get update
apt-get install -y docker-ce docker-ce-cli containerd.io
# Add vagrant to the Docker group, so that it can run commands
usermod -aG docker vagrant
SHELL
end
def rpm_common(config, name) def rpm_common(config, name)
linux_common( linux_common(
config, config,
@ -198,6 +265,25 @@ def rpm_common(config, name)
) )
end end
def rpm_docker(config)
config.vm.provision 'install Docker using yum', type: 'shell', inline: <<-SHELL
# Install prerequisites
yum install -y yum-utils device-mapper-persistent-data lvm2
# Add repository
yum-config-manager -y --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# Install Docker
yum install -y docker-ce docker-ce-cli containerd.io
# Start Docker
systemctl enable --now docker
# Add vagrant to the Docker group, so that it can run commands
usermod -aG docker vagrant
SHELL
end
def dnf_common(config, name) def dnf_common(config, name)
# Autodetect doesn't work.... # Autodetect doesn't work....
if Vagrant.has_plugin?('vagrant-cachier') if Vagrant.has_plugin?('vagrant-cachier')
@ -214,6 +300,25 @@ def dnf_common(config, name)
) )
end end
def dnf_docker(config)
config.vm.provision 'install Docker using dnf', type: 'shell', inline: <<-SHELL
# Install prerequisites
dnf -y install dnf-plugins-core
# Add repository
dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
# Install Docker
dnf install -y docker-ce docker-ce-cli containerd.io
# Start Docker
systemctl enable --now docker
# Add vagrant to the Docker group, so that it can run commands
usermod -aG docker vagrant
SHELL
end
def suse_common(config, name, extra: '') def suse_common(config, name, extra: '')
linux_common( linux_common(
config, config,

View File

@ -77,15 +77,14 @@ import org.gradle.external.javadoc.CoreJavadocOptions
import org.gradle.internal.jvm.Jvm import org.gradle.internal.jvm.Jvm
import org.gradle.language.base.plugins.LifecycleBasePlugin import org.gradle.language.base.plugins.LifecycleBasePlugin
import org.gradle.process.CommandLineArgumentProvider import org.gradle.process.CommandLineArgumentProvider
import org.gradle.process.ExecResult
import org.gradle.process.ExecSpec
import org.gradle.util.GradleVersion import org.gradle.util.GradleVersion
import java.nio.charset.StandardCharsets import java.nio.charset.StandardCharsets
import java.nio.file.Files import java.nio.file.Files
import java.util.regex.Matcher
import static org.elasticsearch.gradle.tool.Boilerplate.maybeConfigure import static org.elasticsearch.gradle.tool.Boilerplate.maybeConfigure
import static org.elasticsearch.gradle.tool.DockerUtils.assertDockerIsAvailable
import static org.elasticsearch.gradle.tool.DockerUtils.getDockerPath
/** /**
* Encapsulates build configuration for elasticsearch projects. * Encapsulates build configuration for elasticsearch projects.
@ -184,8 +183,7 @@ class BuildPlugin implements Plugin<Project> {
*/ */
// check if the Docker binary exists and record its path // check if the Docker binary exists and record its path
final List<String> maybeDockerBinaries = ['/usr/bin/docker', '/usr/local/bin/docker'] final String dockerBinary = getDockerPath().orElse(null)
final String dockerBinary = maybeDockerBinaries.find { it -> new File(it).exists() }
final boolean buildDocker final boolean buildDocker
final String buildDockerProperty = System.getProperty("build.docker") final String buildDockerProperty = System.getProperty("build.docker")
@ -204,55 +202,9 @@ class BuildPlugin implements Plugin<Project> {
ext.set('requiresDocker', []) ext.set('requiresDocker', [])
rootProject.gradle.taskGraph.whenReady { TaskExecutionGraph taskGraph -> rootProject.gradle.taskGraph.whenReady { TaskExecutionGraph taskGraph ->
final List<String> tasks = taskGraph.allTasks.intersect(ext.get('requiresDocker') as List<Task>).collect { " ${it.path}".toString()} final List<String> tasks = taskGraph.allTasks.intersect(ext.get('requiresDocker') as List<Task>).collect { " ${it.path}".toString()}
if (tasks.isEmpty() == false) { if (tasks.isEmpty() == false) {
/* assertDockerIsAvailable(task.project, tasks)
* There are tasks in the task graph that require Docker. Now we are failing because either the Docker binary does not
* exist or because execution of a privileged Docker command failed.
*/
if (dockerBinary == null) {
final String message = String.format(
Locale.ROOT,
"Docker (checked [%s]) is required to run the following task%s: \n%s",
maybeDockerBinaries.join(","),
tasks.size() > 1 ? "s" : "",
tasks.join('\n'))
throwDockerRequiredException(message)
}
// we use a multi-stage Docker build, check the Docker version since 17.05
final ByteArrayOutputStream dockerVersionOutput = new ByteArrayOutputStream()
LoggedExec.exec(
rootProject,
{ ExecSpec it ->
it.commandLine = [dockerBinary, '--version']
it.standardOutput = dockerVersionOutput
})
final String dockerVersion = dockerVersionOutput.toString().trim()
checkDockerVersionRecent(dockerVersion)
final ByteArrayOutputStream dockerImagesErrorOutput = new ByteArrayOutputStream()
// the Docker binary executes, check that we can execute a privileged command
final ExecResult dockerImagesResult = LoggedExec.exec(
rootProject,
{ ExecSpec it ->
it.commandLine = [dockerBinary, "images"]
it.errorOutput = dockerImagesErrorOutput
it.ignoreExitValue = true
})
if (dockerImagesResult.exitValue != 0) {
final String message = String.format(
Locale.ROOT,
"a problem occurred running Docker from [%s] yet it is required to run the following task%s: \n%s\n" +
"the problem is that Docker exited with exit code [%d] with standard error output [%s]",
dockerBinary,
tasks.size() > 1 ? "s" : "",
tasks.join('\n'),
dockerImagesResult.exitValue,
dockerImagesErrorOutput.toString().trim())
throwDockerRequiredException(message)
}
} }
} }
} }
@ -260,28 +212,6 @@ class BuildPlugin implements Plugin<Project> {
(ext.get('requiresDocker') as List<Task>).add(task) (ext.get('requiresDocker') as List<Task>).add(task)
} }
protected static void checkDockerVersionRecent(String dockerVersion) {
final Matcher matcher = dockerVersion =~ /Docker version (\d+\.\d+)\.\d+(?:-[a-zA-Z0-9]+)?, build [0-9a-f]{7,40}/
assert matcher.matches(): dockerVersion
final dockerMajorMinorVersion = matcher.group(1)
final String[] majorMinor = dockerMajorMinorVersion.split("\\.")
if (Integer.parseInt(majorMinor[0]) < 17
|| (Integer.parseInt(majorMinor[0]) == 17 && Integer.parseInt(majorMinor[1]) < 5)) {
final String message = String.format(
Locale.ROOT,
"building Docker images requires Docker version 17.05+ due to use of multi-stage builds yet was [%s]",
dockerVersion)
throwDockerRequiredException(message)
}
}
private static void throwDockerRequiredException(final String message) {
throw new GradleException(
message + "\nyou can address this by attending to the reported issue, "
+ "removing the offending tasks from being executed, "
+ "or by passing -Dbuild.docker=false")
}
/** Add a check before gradle execution phase which ensures java home for the given java version is set. */ /** Add a check before gradle execution phase which ensures java home for the given java version is set. */
static void requireJavaHome(Task task, int version) { static void requireJavaHome(Task task, int version) {
// use root project for global accounting // use root project for global accounting

View File

@ -28,18 +28,22 @@ import org.elasticsearch.gradle.ElasticsearchDistribution.Platform;
import org.elasticsearch.gradle.ElasticsearchDistribution.Type; import org.elasticsearch.gradle.ElasticsearchDistribution.Type;
import org.elasticsearch.gradle.Jdk; import org.elasticsearch.gradle.Jdk;
import org.elasticsearch.gradle.JdkDownloadPlugin; import org.elasticsearch.gradle.JdkDownloadPlugin;
import org.elasticsearch.gradle.OS;
import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.Version;
import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.VersionProperties;
import org.elasticsearch.gradle.info.BuildParams; import org.elasticsearch.gradle.info.BuildParams;
import org.elasticsearch.gradle.vagrant.BatsProgressLogger; import org.elasticsearch.gradle.vagrant.BatsProgressLogger;
import org.elasticsearch.gradle.vagrant.VagrantBasePlugin; import org.elasticsearch.gradle.vagrant.VagrantBasePlugin;
import org.elasticsearch.gradle.vagrant.VagrantExtension; import org.elasticsearch.gradle.vagrant.VagrantExtension;
import org.gradle.api.GradleException;
import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.NamedDomainObjectContainer;
import org.gradle.api.Plugin; import org.gradle.api.Plugin;
import org.gradle.api.Project; import org.gradle.api.Project;
import org.gradle.api.Task; import org.gradle.api.Task;
import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.Configuration;
import org.gradle.api.file.Directory; import org.gradle.api.file.Directory;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import org.gradle.api.plugins.ExtraPropertiesExtension; import org.gradle.api.plugins.ExtraPropertiesExtension;
import org.gradle.api.plugins.JavaBasePlugin; import org.gradle.api.plugins.JavaBasePlugin;
import org.gradle.api.provider.Provider; import org.gradle.api.provider.Provider;
@ -52,6 +56,7 @@ import java.io.IOException;
import java.io.UncheckedIOException; import java.io.UncheckedIOException;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
@ -66,6 +71,7 @@ import static org.elasticsearch.gradle.vagrant.VagrantMachine.convertLinuxPath;
import static org.elasticsearch.gradle.vagrant.VagrantMachine.convertWindowsPath; import static org.elasticsearch.gradle.vagrant.VagrantMachine.convertWindowsPath;
public class DistroTestPlugin implements Plugin<Project> { public class DistroTestPlugin implements Plugin<Project> {
private static final Logger logger = Logging.getLogger(DistroTestPlugin.class);
private static final String GRADLE_JDK_VERSION = "12.0.1+12@69cfe15208a647278a19ef0990eea691"; private static final String GRADLE_JDK_VERSION = "12.0.1+12@69cfe15208a647278a19ef0990eea691";
private static final String GRADLE_JDK_VENDOR = "openjdk"; private static final String GRADLE_JDK_VENDOR = "openjdk";
@ -82,6 +88,8 @@ public class DistroTestPlugin implements Plugin<Project> {
@Override @Override
public void apply(Project project) { public void apply(Project project) {
final boolean runDockerTests = shouldRunDockerTests(project);
project.getPluginManager().apply(DistributionDownloadPlugin.class); project.getPluginManager().apply(DistributionDownloadPlugin.class);
project.getPluginManager().apply(BuildPlugin.class); project.getPluginManager().apply(BuildPlugin.class);
@ -93,16 +101,18 @@ public class DistroTestPlugin implements Plugin<Project> {
Provider<Directory> upgradeDir = project.getLayout().getBuildDirectory().dir("packaging/upgrade"); Provider<Directory> upgradeDir = project.getLayout().getBuildDirectory().dir("packaging/upgrade");
Provider<Directory> pluginsDir = project.getLayout().getBuildDirectory().dir("packaging/plugins"); Provider<Directory> pluginsDir = project.getLayout().getBuildDirectory().dir("packaging/plugins");
List<ElasticsearchDistribution> distributions = configureDistributions(project, upgradeVersion); List<ElasticsearchDistribution> distributions = configureDistributions(project, upgradeVersion, runDockerTests);
TaskProvider<Copy> copyDistributionsTask = configureCopyDistributionsTask(project, distributionsDir); TaskProvider<Copy> copyDistributionsTask = configureCopyDistributionsTask(project, distributionsDir);
TaskProvider<Copy> copyUpgradeTask = configureCopyUpgradeTask(project, upgradeVersion, upgradeDir); TaskProvider<Copy> copyUpgradeTask = configureCopyUpgradeTask(project, upgradeVersion, upgradeDir);
TaskProvider<Copy> copyPluginsTask = configureCopyPluginsTask(project, pluginsDir); TaskProvider<Copy> copyPluginsTask = configureCopyPluginsTask(project, pluginsDir);
TaskProvider<Task> destructiveDistroTest = project.getTasks().register("destructiveDistroTest"); TaskProvider<Task> destructiveDistroTest = project.getTasks().register("destructiveDistroTest");
for (ElasticsearchDistribution distribution : distributions) { for (ElasticsearchDistribution distribution : distributions) {
if (distribution.getType() != Type.DOCKER || runDockerTests == true) {
TaskProvider<?> destructiveTask = configureDistroTest(project, distribution); TaskProvider<?> destructiveTask = configureDistroTest(project, distribution);
destructiveDistroTest.configure(t -> t.dependsOn(destructiveTask)); destructiveDistroTest.configure(t -> t.dependsOn(destructiveTask));
} }
}
Map<String, TaskProvider<?>> batsTests = new HashMap<>(); Map<String, TaskProvider<?>> batsTests = new HashMap<>();
batsTests.put("bats oss", configureBatsTest(project, "oss", distributionsDir, copyDistributionsTask)); batsTests.put("bats oss", configureBatsTest(project, "oss", distributionsDir, copyDistributionsTask));
batsTests.put("bats default", configureBatsTest(project, "default", distributionsDir, copyDistributionsTask)); batsTests.put("bats default", configureBatsTest(project, "default", distributionsDir, copyDistributionsTask));
@ -127,7 +137,23 @@ public class DistroTestPlugin implements Plugin<Project> {
TaskProvider<GradleDistroTestTask> vmTask = TaskProvider<GradleDistroTestTask> vmTask =
configureVMWrapperTask(vmProject, distribution.getName() + " distribution", destructiveTaskName, vmDependencies); configureVMWrapperTask(vmProject, distribution.getName() + " distribution", destructiveTaskName, vmDependencies);
vmTask.configure(t -> t.dependsOn(distribution)); vmTask.configure(t -> t.dependsOn(distribution));
distroTest.configure(t -> t.dependsOn(vmTask));
distroTest.configure(t -> {
// Only VM sub-projects that are specifically opted-in to testing Docker should
// have the Docker task added as a dependency. Although we control whether Docker
// is installed in the VM via `Vagrantfile` and we could auto-detect its presence
// in the VM, the test tasks e.g. `destructiveDistroTest.default-docker` are defined
// on the host during Gradle's configuration phase and not in the VM, so
// auto-detection doesn't work.
//
// The shouldTestDocker property could be null, hence we use Boolean.TRUE.equals()
boolean shouldExecute = distribution.getType() != Type.DOCKER
|| Boolean.TRUE.equals(vmProject.findProperty("shouldTestDocker")) == true;
if (shouldExecute) {
t.dependsOn(vmTask);
}
});
} }
} }
@ -322,17 +348,22 @@ public class DistroTestPlugin implements Plugin<Project> {
}); });
} }
private List<ElasticsearchDistribution> configureDistributions(Project project, Version upgradeVersion) { private List<ElasticsearchDistribution> configureDistributions(Project project, Version upgradeVersion, boolean runDockerTests) {
NamedDomainObjectContainer<ElasticsearchDistribution> distributions = DistributionDownloadPlugin.getContainer(project); NamedDomainObjectContainer<ElasticsearchDistribution> distributions = DistributionDownloadPlugin.getContainer(project);
List<ElasticsearchDistribution> currentDistros = new ArrayList<>(); List<ElasticsearchDistribution> currentDistros = new ArrayList<>();
List<ElasticsearchDistribution> upgradeDistros = new ArrayList<>(); List<ElasticsearchDistribution> upgradeDistros = new ArrayList<>();
for (Type type : Arrays.asList(Type.DEB, Type.RPM)) { for (Type type : Arrays.asList(Type.DEB, Type.RPM, Type.DOCKER)) {
for (Flavor flavor : Flavor.values()) { for (Flavor flavor : Flavor.values()) {
for (boolean bundledJdk : Arrays.asList(true, false)) { for (boolean bundledJdk : Arrays.asList(true, false)) {
// All our Docker images include a bundled JDK so it doesn't make sense to test without one
boolean skip = type == Type.DOCKER && (runDockerTests == false || bundledJdk == false);
if (skip == false) {
addDistro(distributions, type, null, flavor, bundledJdk, VersionProperties.getElasticsearch(), currentDistros); addDistro(distributions, type, null, flavor, bundledJdk, VersionProperties.getElasticsearch(), currentDistros);
} }
} }
}
// upgrade version is always bundled jdk // upgrade version is always bundled jdk
// NOTE: this is mimicking the old VagrantTestPlugin upgrade behavior. It will eventually be replaced // NOTE: this is mimicking the old VagrantTestPlugin upgrade behavior. It will eventually be replaced
// witha dedicated upgrade test from every bwc version like other bwc tests // witha dedicated upgrade test from every bwc version like other bwc tests
@ -341,6 +372,7 @@ public class DistroTestPlugin implements Plugin<Project> {
addDistro(distributions, type, null, Flavor.OSS, true, upgradeVersion.toString(), upgradeDistros); addDistro(distributions, type, null, Flavor.OSS, true, upgradeVersion.toString(), upgradeDistros);
} }
} }
for (Platform platform : Arrays.asList(Platform.LINUX, Platform.WINDOWS)) { for (Platform platform : Arrays.asList(Platform.LINUX, Platform.WINDOWS)) {
for (Flavor flavor : Flavor.values()) { for (Flavor flavor : Flavor.values()) {
for (boolean bundledJdk : Arrays.asList(true, false)) { for (boolean bundledJdk : Arrays.asList(true, false)) {
@ -394,6 +426,99 @@ public class DistroTestPlugin implements Plugin<Project> {
} }
private static String destructiveDistroTestTaskName(ElasticsearchDistribution distro) { private static String destructiveDistroTestTaskName(ElasticsearchDistribution distro) {
return "destructiveDistroTest." + distroId(distro.getType(), distro.getPlatform(), distro.getFlavor(), distro.getBundledJdk()); Type type = distro.getType();
return "destructiveDistroTest." + distroId(
type,
distro.getPlatform(),
distro.getFlavor(),
distro.getBundledJdk());
}
static Map<String, String> parseOsRelease(final List<String> osReleaseLines) {
final Map<String, String> values = new HashMap<>();
osReleaseLines.stream().map(String::trim).filter(line -> (line.isEmpty() || line.startsWith("#")) == false).forEach(line -> {
final String[] parts = line.split("=", 2);
final String key = parts[0];
// remove optional leading and trailing quotes and whitespace
final String value = parts[1].replaceAll("^['\"]?\\s*", "").replaceAll("\\s*['\"]?$", "");
values.put(key, value);
});
return values;
}
static String deriveId(final Map<String, String> osRelease) {
return osRelease.get("ID") + "-" + osRelease.get("VERSION_ID");
}
private static List<String> getLinuxExclusionList(Project project) {
final String exclusionsFilename = "dockerOnLinuxExclusions";
final Path exclusionsPath = project.getRootDir().toPath().resolve(Path.of(".ci", exclusionsFilename));
try {
return Files.readAllLines(exclusionsPath)
.stream()
.map(String::trim)
.filter(line -> (line.isEmpty() || line.startsWith("#")) == false)
.collect(Collectors.toList());
} catch (IOException e) {
throw new GradleException("Failed to read .ci/" + exclusionsFilename, e);
}
}
/**
* The {@link DistroTestPlugin} generates a number of test tasks, some
* of which are Docker packaging tests. When running on the host OS or in CI
* i.e. not in a Vagrant VM, only certain operating systems are supported. This
* method determines whether the Docker tests should be run on the host
* OS. Essentially, unless an OS and version is specifically excluded, we expect
* to be able to run Docker and test the Docker images.
* @param project
*/
private static boolean shouldRunDockerTests(Project project) {
switch (OS.current()) {
case WINDOWS:
// Not yet supported.
return false;
case MAC:
// Assume that Docker for Mac is installed, since Docker is part of the dev workflow.
return true;
case LINUX:
// Only some hosts in CI are configured with Docker. We attempt to work out the OS
// and version, so that we know whether to expect to find Docker. We don't attempt
// to probe for whether Docker is available, because that doesn't tell us whether
// Docker is unavailable when it should be.
final Path osRelease = Paths.get("/etc/os-release");
if (Files.exists(osRelease)) {
Map<String, String> values;
try {
final List<String> osReleaseLines = Files.readAllLines(osRelease);
values = parseOsRelease(osReleaseLines);
} catch (IOException e) {
throw new GradleException("Failed to read /etc/os-release", e);
}
final String id = deriveId(values);
final boolean shouldExclude = getLinuxExclusionList(project).contains(id);
logger.warn("Linux OS id [" + id + "] is " + (shouldExclude ? "" : "not ") + "present in the Docker exclude list");
return shouldExclude == false;
}
logger.warn("/etc/os-release does not exist!");
return false;
default:
logger.warn("Unknown OS [" + OS.current() + "], answering false to shouldRunDockerTests()");
return false;
}
} }
} }

View File

@ -97,8 +97,8 @@ public class DistributionDownloadPlugin implements Plugin<Project> {
// for the distribution as a file, just depend on the artifact directly // for the distribution as a file, just depend on the artifact directly
dependencies.add(distribution.configuration.getName(), dependencyNotation(project, distribution)); dependencies.add(distribution.configuration.getName(), dependencyNotation(project, distribution));
// no extraction allowed for rpm or deb // no extraction allowed for rpm, deb or docker
if (distribution.getType() != Type.RPM && distribution.getType() != Type.DEB) { if (distribution.getType().shouldExtract()) {
// for the distribution extracted, add a root level task that does the extraction, and depend on that // for the distribution extracted, add a root level task that does the extraction, and depend on that
// extracted configuration as an artifact consisting of the extracted distribution directory // extracted configuration as an artifact consisting of the extracted distribution directory
dependencies.add(distribution.getExtracted().configuration.getName(), dependencies.add(distribution.getExtracted().configuration.getName(),
@ -245,7 +245,6 @@ public class DistributionDownloadPlugin implements Plugin<Project> {
} }
private static Dependency projectDependency(Project project, String projectPath, String projectConfig) { private static Dependency projectDependency(Project project, String projectPath, String projectConfig) {
if (project.findProject(projectPath) == null) { if (project.findProject(projectPath) == null) {
throw new GradleException("no project [" + projectPath + "], project names: " + project.getRootProject().getAllprojects()); throw new GradleException("no project [" + projectPath + "], project names: " + project.getRootProject().getAllprojects());
} }
@ -257,11 +256,20 @@ public class DistributionDownloadPlugin implements Plugin<Project> {
private static String distributionProjectPath(ElasticsearchDistribution distribution) { private static String distributionProjectPath(ElasticsearchDistribution distribution) {
String projectPath = ":distribution"; String projectPath = ":distribution";
if (distribution.getType() == Type.INTEG_TEST_ZIP) { switch (distribution.getType()) {
case INTEG_TEST_ZIP:
projectPath += ":archives:integ-test-zip"; projectPath += ":archives:integ-test-zip";
} else { break;
case DOCKER:
projectPath += ":docker:";
projectPath += distributionProjectName(distribution);
break;
default:
projectPath += distribution.getType() == Type.ARCHIVE ? ":archives:" : ":packages:"; projectPath += distribution.getType() == Type.ARCHIVE ? ":archives:" : ":packages:";
projectPath += distributionProjectName(distribution); projectPath += distributionProjectName(distribution);
break;
} }
return projectPath; return projectPath;
} }
@ -274,6 +282,7 @@ public class DistributionDownloadPlugin implements Plugin<Project> {
if (distribution.getBundledJdk() == false) { if (distribution.getBundledJdk() == false) {
projectName += "no-jdk-"; projectName += "no-jdk-";
} }
if (distribution.getType() == Type.ARCHIVE) { if (distribution.getType() == Type.ARCHIVE) {
if (Version.fromString(distribution.getVersion()).onOrAfter("7.0.0")) { if (Version.fromString(distribution.getVersion()).onOrAfter("7.0.0")) {
Platform platform = distribution.getPlatform(); Platform platform = distribution.getPlatform();
@ -281,6 +290,8 @@ public class DistributionDownloadPlugin implements Plugin<Project> {
} else { } else {
projectName = distribution.getFlavor().equals(Flavor.DEFAULT) ?"zip" : "oss-zip"; projectName = distribution.getFlavor().equals(Flavor.DEFAULT) ?"zip" : "oss-zip";
} }
} else if (distribution.getType() == Type.DOCKER) {
projectName += "docker-export";
} else { } else {
projectName += distribution.getType(); projectName += distribution.getType();
} }

View File

@ -46,12 +46,25 @@ public class ElasticsearchDistribution implements Buildable, Iterable<File> {
INTEG_TEST_ZIP, INTEG_TEST_ZIP,
ARCHIVE, ARCHIVE,
RPM, RPM,
DEB; DEB,
DOCKER;
@Override @Override
public String toString() { public String toString() {
return super.toString().toLowerCase(Locale.ROOT); return super.toString().toLowerCase(Locale.ROOT);
} }
public boolean shouldExtract() {
switch (this) {
case DEB:
case DOCKER:
case RPM:
return false;
default:
return true;
}
}
} }
public enum Flavor { public enum Flavor {
@ -171,12 +184,17 @@ public class ElasticsearchDistribution implements Buildable, Iterable<File> {
} }
public Extracted getExtracted() { public Extracted getExtracted() {
if (getType() == Type.RPM || getType() == Type.DEB) { switch (getType()) {
case DEB:
case DOCKER:
case RPM:
throw new UnsupportedOperationException("distribution type [" + getType() + "] for " + throw new UnsupportedOperationException("distribution type [" + getType() + "] for " +
"elasticsearch distribution [" + name + "] cannot be extracted"); "elasticsearch distribution [" + name + "] cannot be extracted");
}
default:
return extracted; return extracted;
} }
}
@Override @Override
public TaskDependency getBuildDependencies() { public TaskDependency getBuildDependencies() {
@ -217,7 +235,7 @@ public class ElasticsearchDistribution implements Buildable, Iterable<File> {
if (platform.isPresent() == false) { if (platform.isPresent() == false) {
platform.set(CURRENT_PLATFORM); platform.set(CURRENT_PLATFORM);
} }
} else { // rpm or deb } else { // rpm, deb or docker
if (platform.isPresent()) { if (platform.isPresent()) {
throw new IllegalArgumentException("platform not allowed for elasticsearch distribution [" throw new IllegalArgumentException("platform not allowed for elasticsearch distribution ["
+ name + "] of type [" + getType() + "]"); + name + "] of type [" + getType() + "]");

View File

@ -0,0 +1,239 @@
package org.elasticsearch.gradle.tool;
import org.elasticsearch.gradle.Version;
import org.gradle.api.GradleException;
import org.gradle.api.Project;
import org.gradle.process.ExecResult;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.util.List;
import java.util.Locale;
import java.util.Optional;
/**
* Contains utilities for checking whether Docker is installed, is executable,
* has a recent enough version, and appears to be functional. The Elasticsearch build
* requires Docker &gt;= 17.05 as it uses a multi-stage build.
*/
public class DockerUtils {
/**
* Defines the possible locations of the Docker CLI. These will be searched in order.
*/
private static String[] DOCKER_BINARIES = { "/usr/bin/docker", "/usr/local/bin/docker" };
/**
* Searches the entries in {@link #DOCKER_BINARIES} for the Docker CLI. This method does
* not check whether the Docker installation appears usable, see {@link #getDockerAvailability(Project)}
* instead.
*
* @return the path to a CLI, if available.
*/
public static Optional<String> getDockerPath() {
// Check if the Docker binary exists
return List.of(DOCKER_BINARIES)
.stream()
.filter(path -> new File(path).exists())
.findFirst();
}
/**
* Searches for a functional Docker installation, and returns information about the search.
* @return the results of the search.
*/
private static DockerAvailability getDockerAvailability(Project project) {
String dockerPath = null;
Result lastResult = null;
Version version = null;
boolean isVersionHighEnough = false;
// Check if the Docker binary exists
final Optional<String> dockerBinary = getDockerPath();
if (dockerBinary.isPresent()) {
dockerPath = dockerBinary.get();
// Since we use a multi-stage Docker build, check the Docker version since 17.05
lastResult = runCommand(project, dockerPath, "version", "--format", "{{.Server.Version}}");
if (lastResult.isSuccess() == true) {
version = Version.fromString(lastResult.stdout.trim(), Version.Mode.RELAXED);
isVersionHighEnough = version.onOrAfter("17.05.0");
if (isVersionHighEnough == true) {
// Check that we can execute a privileged command
lastResult = runCommand(project, dockerPath, "images");
}
}
}
boolean isAvailable = isVersionHighEnough && lastResult.isSuccess() == true;
return new DockerAvailability(isAvailable, isVersionHighEnough, dockerPath, version, lastResult);
}
/**
* An immutable class that represents the results of a Docker search from {@link #getDockerAvailability(Project)}}.
*/
private static class DockerAvailability {
/**
* Indicates whether Docker is available and meets the required criteria.
* True if, and only if, Docker is:
* <ul>
* <li>Installed</li>
* <li>Executable</li>
* <li>Is at least version 17.05</li>
* <li>Can execute a command that requires privileges</li>
* </ul>
*/
final boolean isAvailable;
/**
* True if the installed Docker version is &gt;= 17.05
*/
final boolean isVersionHighEnough;
/**
* The path to the Docker CLI, or null
*/
public final String path;
/**
* The installed Docker version, or null
*/
public final Version version;
/**
* Information about the last command executes while probing Docker, or null.
*/
final Result lastCommand;
DockerAvailability(boolean isAvailable, boolean isVersionHighEnough, String path, Version version, Result lastCommand) {
this.isAvailable = isAvailable;
this.isVersionHighEnough = isVersionHighEnough;
this.path = path;
this.version = version;
this.lastCommand = lastCommand;
}
}
/**
* Given a list of tasks that requires Docker, check whether Docker is available, otherwise
* throw an exception.
* @param project a Gradle project
* @param tasks the tasks that require Docker
* @throws GradleException if Docker is not available. The exception message gives the reason.
*/
public static void assertDockerIsAvailable(Project project, List<String> tasks) {
DockerAvailability availability = getDockerAvailability(project);
if (availability.isAvailable == true) {
return;
}
/*
* There are tasks in the task graph that require Docker.
* Now we are failing because either the Docker binary does
* not exist or because execution of a privileged Docker
* command failed.
*/
if (availability.path == null) {
final String message = String.format(
Locale.ROOT,
"Docker (checked [%s]) is required to run the following task%s: \n%s",
String.join(", ", DOCKER_BINARIES),
tasks.size() > 1 ? "s" : "",
String.join("\n", tasks));
throwDockerRequiredException(message);
}
if (availability.version == null) {
final String message = String.format(
Locale.ROOT,
"Docker is required to run the following task%s, but it doesn't appear to be running: \n%s",
tasks.size() > 1 ? "s" : "",
String.join("\n", tasks));
throwDockerRequiredException(message);
}
if (availability.isVersionHighEnough == false) {
final String message = String.format(
Locale.ROOT,
"building Docker images requires Docker version 17.05+ due to use of multi-stage builds yet was [%s]",
availability.version);
throwDockerRequiredException(message);
}
// Some other problem, print the error
final String message = String.format(
Locale.ROOT,
"a problem occurred running Docker from [%s] yet it is required to run the following task%s: \n%s\n" +
"the problem is that Docker exited with exit code [%d] with standard error output [%s]",
availability.path,
tasks.size() > 1 ? "s" : "",
String.join("\n", tasks),
availability.lastCommand.exitCode,
availability.lastCommand.stderr.trim());
throwDockerRequiredException(message);
}
private static void throwDockerRequiredException(final String message) {
throwDockerRequiredException(message, null);
}
private static void throwDockerRequiredException(final String message, Exception e) {
throw new GradleException(
message + "\nyou can address this by attending to the reported issue, "
+ "removing the offending tasks from being executed, "
+ "or by passing -Dbuild.docker=false", e);
}
/**
* Runs a command and captures the exit code, standard output and standard error.
* @param args the command and any arguments to execute
* @return a object that captures the result of running the command. If an exception occurring
* while running the command, or the process was killed after reaching the 10s timeout,
* then the exit code will be -1.
*/
private static Result runCommand(Project project, String... args) {
if (args.length == 0) {
throw new IllegalArgumentException("Cannot execute with no command");
}
ByteArrayOutputStream stdout = new ByteArrayOutputStream();
ByteArrayOutputStream stderr = new ByteArrayOutputStream();
final ExecResult execResult = project.exec(spec -> {
// The redundant cast is to silence a compiler warning.
spec.setCommandLine((Object[]) args);
spec.setStandardOutput(stdout);
spec.setErrorOutput(stderr);
});
return new Result(execResult.getExitValue(), stdout.toString(), stderr.toString());
}
/**
* This class models the result of running a command. It captures the exit code, standard output and standard error.
*/
private static class Result {
final int exitCode;
final String stdout;
final String stderr;
Result(int exitCode, String stdout, String stderr) {
this.exitCode = exitCode;
this.stdout = stdout;
this.stderr = stderr;
}
boolean isSuccess() {
return exitCode == 0;
}
public String toString() {
return "exitCode = [" + exitCode + "] " + "stdout = [" + stdout.trim() + "] " + "stderr = [" + stderr.trim() + "]";
}
}
}

View File

@ -13,9 +13,28 @@ public final class Version implements Comparable<Version> {
private final int revision; private final int revision;
private final int id; private final int id;
/**
* Specifies how a version string should be parsed.
*/
public enum Mode {
/**
* Strict parsing only allows known suffixes after the patch number: "-alpha", "-beta" or "-rc". The
* suffix "-SNAPSHOT" is also allowed, either after the patch number, or after the other suffices.
*/
STRICT,
/**
* Relaxed parsing allows any alphanumeric suffix after the patch number.
*/
RELAXED
}
private static final Pattern pattern = private static final Pattern pattern =
Pattern.compile("(\\d)+\\.(\\d+)\\.(\\d+)(-alpha\\d+|-beta\\d+|-rc\\d+)?(-SNAPSHOT)?"); Pattern.compile("(\\d)+\\.(\\d+)\\.(\\d+)(-alpha\\d+|-beta\\d+|-rc\\d+)?(-SNAPSHOT)?");
private static final Pattern relaxedPattern =
Pattern.compile("(\\d)+\\.(\\d+)\\.(\\d+)(-[a-zA-Z0-9_]+)*?");
public Version(int major, int minor, int revision) { public Version(int major, int minor, int revision) {
Objects.requireNonNull(major, "major version can't be null"); Objects.requireNonNull(major, "major version can't be null");
Objects.requireNonNull(minor, "minor version can't be null"); Objects.requireNonNull(minor, "minor version can't be null");
@ -36,11 +55,18 @@ public final class Version implements Comparable<Version> {
} }
public static Version fromString(final String s) { public static Version fromString(final String s) {
return fromString(s, Mode.STRICT);
}
public static Version fromString(final String s, final Mode mode) {
Objects.requireNonNull(s); Objects.requireNonNull(s);
Matcher matcher = pattern.matcher(s); Matcher matcher = mode == Mode.STRICT ? pattern.matcher(s) : relaxedPattern.matcher(s);
if (matcher.matches() == false) { if (matcher.matches() == false) {
String expected = mode == Mode.STRICT == true
? "major.minor.revision[-(alpha|beta|rc)Number][-SNAPSHOT]"
: "major.minor.revision[-extra]";
throw new IllegalArgumentException( throw new IllegalArgumentException(
"Invalid version format: '" + s + "'. Should be major.minor.revision[-(alpha|beta|rc)Number][-SNAPSHOT]" "Invalid version format: '" + s + "'. Should be " + expected
); );
} }

View File

@ -28,17 +28,6 @@ import java.net.URISyntaxException;
public class BuildPluginTests extends GradleUnitTestCase { public class BuildPluginTests extends GradleUnitTestCase {
public void testPassingDockerVersions() {
BuildPlugin.checkDockerVersionRecent("Docker version 18.06.1-ce, build e68fc7a215d7");
BuildPlugin.checkDockerVersionRecent("Docker version 17.05.0, build e68fc7a");
BuildPlugin.checkDockerVersionRecent("Docker version 17.05.1, build e68fc7a");
}
@Test(expected = GradleException.class)
public void testFailingDockerVersions() {
BuildPlugin.checkDockerVersionRecent("Docker version 17.04.0, build e68fc7a");
}
@Test(expected = GradleException.class) @Test(expected = GradleException.class)
public void testRepositoryURIThatUsesHttpScheme() throws URISyntaxException { public void testRepositoryURIThatUsesHttpScheme() throws URISyntaxException {
final URI uri = new URI("http://s3.amazonaws.com/artifacts.elastic.co/maven"); final URI uri = new URI("http://s3.amazonaws.com/artifacts.elastic.co/maven");

View File

@ -40,6 +40,14 @@ public class VersionTests extends GradleUnitTestCase {
assertVersionEquals("6.1.2-beta1-SNAPSHOT", 6, 1, 2); assertVersionEquals("6.1.2-beta1-SNAPSHOT", 6, 1, 2);
} }
public void testRelaxedVersionParsing() {
assertVersionEquals("6.1.2", 6, 1, 2, Version.Mode.RELAXED);
assertVersionEquals("6.1.2-SNAPSHOT", 6, 1, 2, Version.Mode.RELAXED);
assertVersionEquals("6.1.2-beta1-SNAPSHOT", 6, 1, 2, Version.Mode.RELAXED);
assertVersionEquals("6.1.2-foo", 6, 1, 2, Version.Mode.RELAXED);
assertVersionEquals("6.1.2-foo-bar", 6, 1, 2, Version.Mode.RELAXED);
}
public void testCompareWithStringVersions() { public void testCompareWithStringVersions() {
assertTrue("1.10.20 is not interpreted as before 2.0.0", assertTrue("1.10.20 is not interpreted as before 2.0.0",
Version.fromString("1.10.20").before("2.0.0") Version.fromString("1.10.20").before("2.0.0")
@ -100,7 +108,11 @@ public class VersionTests extends GradleUnitTestCase {
} }
private void assertVersionEquals(String stringVersion, int major, int minor, int revision) { private void assertVersionEquals(String stringVersion, int major, int minor, int revision) {
Version version = Version.fromString(stringVersion); assertVersionEquals(stringVersion, major, minor, revision, Version.Mode.STRICT);
}
private void assertVersionEquals(String stringVersion, int major, int minor, int revision, Version.Mode mode) {
Version version = Version.fromString(stringVersion, mode);
assertEquals(major, version.getMajor()); assertEquals(major, version.getMajor());
assertEquals(minor, version.getMinor()); assertEquals(minor, version.getMinor());
assertEquals(revision, version.getRevision()); assertEquals(revision, version.getRevision());

View File

@ -0,0 +1,85 @@
package org.elasticsearch.gradle.test;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.gradle.test.DistroTestPlugin.deriveId;
import static org.elasticsearch.gradle.test.DistroTestPlugin.parseOsRelease;
import static org.hamcrest.CoreMatchers.equalTo;
public class DistroTestPluginTests extends GradleIntegrationTestCase {
public void testParseOsReleaseOnOracle() {
final List<String> lines = List
.of(
"NAME=\"Oracle Linux Server\"",
"VERSION=\"6.10\"",
"ID=\"ol\"",
"VERSION_ID=\"6.10\"",
"PRETTY_NAME=\"Oracle Linux Server 6.10\"",
"ANSI_COLOR=\"0;31\"",
"CPE_NAME=\"cpe:/o:oracle:linux:6:10:server\"",
"HOME_URL" + "=\"https://linux.oracle.com/\"",
"BUG_REPORT_URL=\"https://bugzilla.oracle.com/\"",
"",
"ORACLE_BUGZILLA_PRODUCT" + "=\"Oracle Linux 6\"",
"ORACLE_BUGZILLA_PRODUCT_VERSION=6.10",
"ORACLE_SUPPORT_PRODUCT=\"Oracle Linux\"",
"ORACLE_SUPPORT_PRODUCT_VERSION=6.10"
);
final Map<String, String> results = parseOsRelease(lines);
final Map<String, String> expected = new HashMap<>();
expected.put("ANSI_COLOR", "0;31");
expected.put("BUG_REPORT_URL", "https://bugzilla.oracle.com/");
expected.put("CPE_NAME", "cpe:/o:oracle:linux:6:10:server");
expected.put("HOME_URL" + "", "https://linux.oracle.com/");
expected.put("ID", "ol");
expected.put("NAME", "Oracle Linux Server");
expected.put("ORACLE_BUGZILLA_PRODUCT" + "", "Oracle Linux 6");
expected.put("ORACLE_BUGZILLA_PRODUCT_VERSION", "6.10");
expected.put("ORACLE_SUPPORT_PRODUCT", "Oracle Linux");
expected.put("ORACLE_SUPPORT_PRODUCT_VERSION", "6.10");
expected.put("PRETTY_NAME", "Oracle Linux Server 6.10");
expected.put("VERSION", "6.10");
expected.put("VERSION_ID", "6.10");
assertThat(expected, equalTo(results));
}
/**
* Trailing whitespace should be removed
*/
public void testRemoveTrailingWhitespace() {
final List<String> lines = List.of("NAME=\"Oracle Linux Server\" ");
final Map<String, String> results = parseOsRelease(lines);
final Map<String, String> expected = Map.of("NAME", "Oracle Linux Server");
assertThat(expected, equalTo(results));
}
/**
* Comments should be removed
*/
public void testRemoveComments() {
final List<String> lines = List.of("# A comment", "NAME=\"Oracle Linux Server\"");
final Map<String, String> results = parseOsRelease(lines);
final Map<String, String> expected = Map.of("NAME", "Oracle Linux Server");
assertThat(expected, equalTo(results));
}
public void testDeriveIdOnOracle() {
final Map<String, String> osRelease = new HashMap<>();
osRelease.put("ID", "ol");
osRelease.put("VERSION_ID", "6.10");
assertThat("ol-6.10", equalTo(deriveId(osRelease)));
}
}

View File

@ -191,3 +191,38 @@ for (final boolean oss : [false, true]) {
if (tasks.findByName("composePull")) { if (tasks.findByName("composePull")) {
tasks.composePull.enabled = false tasks.composePull.enabled = false
} }
/*
* The export subprojects write out the generated Docker images to disk, so
* that they can be easily reloaded, for example into a VM.
*/
subprojects { Project subProject ->
if (subProject.name.contains('docker-export')) {
apply plugin: 'distribution'
final boolean oss = subProject.name.contains('oss-')
final boolean ubi = subProject.name.contains('ubi-')
def exportTaskName = taskName("export", oss, ubi, "DockerImage")
def buildTaskName = taskName("build", oss, ubi, "DockerImage")
def tarFile = "${parent.projectDir}/build/elasticsearch${oss ? '-oss' : ''}${ubi ? '-ubi7' : ''}_test.${VersionProperties.elasticsearch}.docker.tar"
final Task exportDockerImageTask = task(exportTaskName, type: LoggedExec) {
executable 'docker'
args "save",
"-o",
tarFile,
"elasticsearch${oss ? '-oss' : ''}${ubi ? '-ubi7' : ''}:test"
}
exportDockerImageTask.dependsOn(parent.tasks.getByName(buildTaskName))
artifacts.add('default', file(tarFile)) {
type 'tar'
name "elasticsearch${oss ? '-oss' : ''}${ubi ? '-ubi7' : ''}"
builtBy exportTaskName
}
assemble.dependsOn exportTaskName
}
}

View File

@ -0,0 +1,2 @@
// This file is intentionally blank. All configuration of the
// export is done in the parent project.

View File

@ -0,0 +1,2 @@
// This file is intentionally blank. All configuration of the
// export is done in the parent project.

View File

@ -0,0 +1 @@
project.ext.shouldTestDocker = true

View File

@ -0,0 +1 @@
project.ext.shouldTestDocker = true

View File

@ -0,0 +1 @@
project.ext.shouldTestDocker = true

View File

@ -0,0 +1 @@
project.ext.shouldTestDocker = true

View File

@ -0,0 +1,227 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.test;
import org.apache.http.client.fluent.Request;
import org.elasticsearch.packaging.util.Distribution;
import org.elasticsearch.packaging.util.Docker.DockerShell;
import org.elasticsearch.packaging.util.Installation;
import org.elasticsearch.packaging.util.ServerUtils;
import org.elasticsearch.packaging.util.Shell.Result;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
import static java.nio.file.attribute.PosixFilePermissions.fromString;
import static org.elasticsearch.packaging.util.Docker.assertPermissionsAndOwnership;
import static org.elasticsearch.packaging.util.Docker.copyFromContainer;
import static org.elasticsearch.packaging.util.Docker.ensureImageIsLoaded;
import static org.elasticsearch.packaging.util.Docker.existsInContainer;
import static org.elasticsearch.packaging.util.Docker.removeContainer;
import static org.elasticsearch.packaging.util.Docker.runContainer;
import static org.elasticsearch.packaging.util.Docker.verifyContainerInstallation;
import static org.elasticsearch.packaging.util.Docker.waitForPathToExist;
import static org.elasticsearch.packaging.util.FileMatcher.p660;
import static org.elasticsearch.packaging.util.FileUtils.append;
import static org.elasticsearch.packaging.util.FileUtils.getTempDir;
import static org.elasticsearch.packaging.util.FileUtils.mkdir;
import static org.elasticsearch.packaging.util.FileUtils.rm;
import static org.elasticsearch.packaging.util.ServerUtils.makeRequest;
import static org.elasticsearch.packaging.util.ServerUtils.waitForElasticsearch;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.emptyString;
import static org.junit.Assume.assumeTrue;
public class DockerTests extends PackagingTestCase {
protected DockerShell sh;
@BeforeClass
public static void filterDistros() {
assumeTrue("only Docker", distribution.packaging == Distribution.Packaging.DOCKER);
ensureImageIsLoaded(distribution);
}
@AfterClass
public static void cleanup() {
// runContainer also calls this, so we don't need this method to be annotated as `@After`
removeContainer();
}
@Before
public void setupTest() throws Exception {
sh = new DockerShell();
installation = runContainer(distribution());
}
/**
* Checks that the Docker image can be run, and that it passes various checks.
*/
public void test10Install() {
verifyContainerInstallation(installation, distribution());
}
/**
* Checks that no plugins are initially active.
*/
public void test20PluginsListWithNoPlugins() {
final Installation.Executables bin = installation.executables();
final Result r = sh.run(bin.elasticsearchPlugin + " list");
assertThat("Expected no plugins to be listed", r.stdout, emptyString());
}
/**
* Check that a keystore can be manually created using the provided CLI tool.
*/
public void test40CreateKeystoreManually() throws InterruptedException {
final Installation.Executables bin = installation.executables();
final Path keystorePath = installation.config("elasticsearch.keystore");
waitForPathToExist(keystorePath);
// Move the auto-created one out of the way, or else the CLI prompts asks us to confirm
sh.run("mv " + keystorePath + " " + keystorePath + ".bak");
sh.run(bin.elasticsearchKeystore + " create");
final Result r = sh.run(bin.elasticsearchKeystore + " list");
assertThat(r.stdout, containsString("keystore.seed"));
}
/**
* Send some basic index, count and delete requests, in order to check that the installation
* is minimally functional.
*/
public void test50BasicApiTests() throws Exception {
waitForElasticsearch(installation);
assertTrue(existsInContainer(installation.logs.resolve("gc.log")));
ServerUtils.runElasticsearchTests();
}
/**
* Check that the default keystore is automatically created
*/
public void test60AutoCreateKeystore() throws Exception {
final Path keystorePath = installation.config("elasticsearch.keystore");
waitForPathToExist(keystorePath);
assertPermissionsAndOwnership(keystorePath, p660);
final Installation.Executables bin = installation.executables();
final Result result = sh.run(bin.elasticsearchKeystore + " list");
assertThat(result.stdout, containsString("keystore.seed"));
}
/**
* Check that the default config can be overridden using a bind mount, and that env vars are respected
*/
public void test70BindMountCustomPathConfAndJvmOptions() throws Exception {
final Path tempConf = getTempDir().resolve("esconf-alternate");
try {
mkdir(tempConf);
copyFromContainer(installation.config("elasticsearch.yml"), tempConf.resolve("elasticsearch.yml"));
copyFromContainer(installation.config("log4j2.properties"), tempConf.resolve("log4j2.properties"));
// we have to disable Log4j from using JMX lest it will hit a security
// manager exception before we have configured logging; this will fail
// startup since we detect usages of logging before it is configured
final String jvmOptions =
"-Xms512m\n" +
"-Xmx512m\n" +
"-Dlog4j2.disable.jmx=true\n";
append(tempConf.resolve("jvm.options"), jvmOptions);
// Make the temp directory and contents accessible when bind-mounted
Files.setPosixFilePermissions(tempConf, fromString("rwxrwxrwx"));
final Map<String, String> envVars = new HashMap<>();
envVars.put("ES_JAVA_OPTS", "-XX:-UseCompressedOops");
// Restart the container
removeContainer();
runContainer(distribution(), tempConf, envVars);
waitForElasticsearch(installation);
final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes"));
assertThat(nodesResponse, containsString("\"heap_init_in_bytes\":536870912"));
assertThat(nodesResponse, containsString("\"using_compressed_ordinary_object_pointers\":\"false\""));
} finally {
rm(tempConf);
}
}
/**
* Check whether the elasticsearch-certutil tool has been shipped correctly,
* and if present then it can execute.
*/
public void test90SecurityCliPackaging() {
final Installation.Executables bin = installation.executables();
final Path securityCli = installation.lib.resolve("tools").resolve("security-cli");
if (distribution().isDefault()) {
assertTrue(existsInContainer(securityCli));
Result result = sh.run(bin.elasticsearchCertutil + " --help");
assertThat(result.stdout, containsString("Simplifies certificate creation for use with the Elastic Stack"));
// Ensure that the exit code from the java command is passed back up through the shell script
result = sh.runIgnoreExitCode(bin.elasticsearchCertutil + " invalid-command");
assertThat(result.isSuccess(), is(false));
assertThat(result.stdout, containsString("Unknown command [invalid-command]"));
} else {
assertFalse(existsInContainer(securityCli));
}
}
/**
* Check that the elasticsearch-shard tool is shipped in the Docker image and is executable.
*/
public void test91ElasticsearchShardCliPackaging() {
final Installation.Executables bin = installation.executables();
final Result result = sh.run(bin.elasticsearchShard + " -h");
assertThat(result.stdout, containsString("A CLI tool to remove corrupted parts of unrecoverable shards"));
}
/**
* Check that the elasticsearch-node tool is shipped in the Docker image and is executable.
*/
public void test92ElasticsearchNodeCliPackaging() {
final Installation.Executables bin = installation.executables();
final Result result = sh.run(bin.elasticsearchNode + " -h");
assertThat(result.stdout,
containsString("A CLI tool to do unsafe cluster and index manipulations on current node"));
}
}

View File

@ -69,11 +69,11 @@ public abstract class PackagingTestCase extends Assert {
protected static final String systemJavaHome; protected static final String systemJavaHome;
static { static {
Shell sh = new Shell(); Shell sh = new Shell();
if (Platforms.LINUX) { if (Platforms.WINDOWS) {
systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim();
} else {
assert Platforms.WINDOWS;
systemJavaHome = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); systemJavaHome = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim();
} else {
assert Platforms.LINUX || Platforms.DARWIN;
systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim();
} }
} }

View File

@ -33,9 +33,16 @@ public class Distribution {
public Distribution(Path path) { public Distribution(Path path) {
this.path = path; this.path = path;
String filename = path.getFileName().toString(); String filename = path.getFileName().toString();
if (filename.endsWith(".gz")) {
this.packaging = Packaging.TAR;
} else if (filename.endsWith(".docker.tar")) {
this.packaging = Packaging.DOCKER;
} else {
int lastDot = filename.lastIndexOf('.'); int lastDot = filename.lastIndexOf('.');
String extension = filename.substring(lastDot + 1); this.packaging = Packaging.valueOf(filename.substring(lastDot + 1).toUpperCase(Locale.ROOT));
this.packaging = Packaging.valueOf(extension.equals("gz") ? "TAR" : extension.toUpperCase(Locale.ROOT)); }
this.platform = filename.contains("windows") ? Platform.WINDOWS : Platform.LINUX; this.platform = filename.contains("windows") ? Platform.WINDOWS : Platform.LINUX;
this.flavor = filename.contains("oss") ? Flavor.OSS : Flavor.DEFAULT; this.flavor = filename.contains("oss") ? Flavor.OSS : Flavor.DEFAULT;
this.hasJdk = filename.contains("no-jdk") == false; this.hasJdk = filename.contains("no-jdk") == false;
@ -62,7 +69,8 @@ public class Distribution {
TAR(".tar.gz", Platforms.LINUX || Platforms.DARWIN), TAR(".tar.gz", Platforms.LINUX || Platforms.DARWIN),
ZIP(".zip", Platforms.WINDOWS), ZIP(".zip", Platforms.WINDOWS),
DEB(".deb", Platforms.isDPKG()), DEB(".deb", Platforms.isDPKG()),
RPM(".rpm", Platforms.isRPM()); RPM(".rpm", Platforms.isRPM()),
DOCKER(".docker.tar", Platforms.isDocker());
/** The extension of this distribution's file */ /** The extension of this distribution's file */
public final String extension; public final String extension;

View File

@ -0,0 +1,359 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.nio.file.Path;
import java.nio.file.attribute.PosixFilePermission;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Stream;
import static java.nio.file.attribute.PosixFilePermissions.fromString;
import static org.elasticsearch.packaging.util.FileMatcher.p644;
import static org.elasticsearch.packaging.util.FileMatcher.p660;
import static org.elasticsearch.packaging.util.FileMatcher.p755;
import static org.elasticsearch.packaging.util.FileMatcher.p775;
import static org.elasticsearch.packaging.util.FileUtils.getCurrentVersion;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
/**
* Utilities for running packaging tests against the Elasticsearch Docker images.
*/
public class Docker {
private static final Log logger = LogFactory.getLog(Docker.class);
private static final Shell sh = new Shell();
private static final DockerShell dockerShell = new DockerShell();
/**
* Tracks the currently running Docker image. An earlier implementation used a fixed container name,
* but that appeared to cause problems with repeatedly destroying and recreating containers with
* the same name.
*/
private static String containerId = null;
/**
* Checks whether the required Docker image exists. If not, the image is loaded from disk. No check is made
* to see whether the image is up-to-date.
* @param distribution details about the docker image to potentially load.
*/
public static void ensureImageIsLoaded(Distribution distribution) {
final long count = sh.run("docker image ls --format '{{.Repository}}' " + distribution.flavor.name).stdout.split("\n").length;
if (count != 0) {
return;
}
logger.info("Loading Docker image: " + distribution.path);
sh.run("docker load -i " + distribution.path);
}
/**
* Runs an Elasticsearch Docker container.
* @param distribution details about the docker image being tested.
*/
public static Installation runContainer(Distribution distribution) throws Exception {
return runContainer(distribution, null, Collections.emptyMap());
}
/**
* Runs an Elasticsearch Docker container, with options for overriding the config directory
* through a bind mount, and passing additional environment variables.
*
* @param distribution details about the docker image being tested.
* @param configPath the path to the config to bind mount, or null
* @param envVars environment variables to set when running the container
*/
public static Installation runContainer(Distribution distribution, Path configPath, Map<String,String> envVars) throws Exception {
removeContainer();
final List<String> args = new ArrayList<>();
args.add("docker run");
// Remove the container once it exits
args.add("--rm");
// Run the container in the background
args.add("--detach");
envVars.forEach((key, value) -> args.add("--env " + key + "=\"" + value + "\""));
// The container won't run without configuring discovery
args.add("--env discovery.type=single-node");
// Map ports in the container to the host, so that we can send requests
args.add("--publish 9200:9200");
args.add("--publish 9300:9300");
if (configPath != null) {
// Bind-mount the config dir, if specified
args.add("--volume \"" + configPath + ":/usr/share/elasticsearch/config\"");
}
args.add(distribution.flavor.name + ":test");
final String command = String.join(" ", args);
logger.debug("Running command: " + command);
containerId = sh.run(command).stdout.trim();
waitForElasticsearchToStart();
return Installation.ofContainer();
}
/**
* Waits for the Elasticsearch process to start executing in the container.
* This is called every time a container is started.
*/
private static void waitForElasticsearchToStart() throws InterruptedException {
boolean isElasticsearchRunning = false;
int attempt = 0;
String psOutput;
do {
// Give the container a chance to crash out
Thread.sleep(1000);
psOutput = dockerShell.run("ps ax").stdout;
if (psOutput.contains("/usr/share/elasticsearch/jdk/bin/java")) {
isElasticsearchRunning = true;
break;
}
} while (attempt++ < 5);
if (!isElasticsearchRunning) {
final String dockerLogs = sh.run("docker logs " + containerId).stdout;
fail("Elasticsearch container did start successfully.\n\n" + psOutput + "\n\n" + dockerLogs);
}
}
/**
* Removes the currently running container.
*/
public static void removeContainer() {
if (containerId != null) {
try {
// Remove the container, forcibly killing it if necessary
logger.debug("Removing container " + containerId);
final String command = "docker rm -f " + containerId;
final Shell.Result result = sh.runIgnoreExitCode(command);
if (result.isSuccess() == false) {
// I'm not sure why we're already removing this container, but that's OK.
if (result.stderr.contains("removal of container " + " is already in progress") == false) {
throw new RuntimeException(
"Command was not successful: [" + command + "] result: " + result.toString());
}
}
} finally {
// Null out the containerId under all circumstances, so that even if the remove command fails
// for some reason, the other tests will still proceed. Otherwise they can get stuck, continually
// trying to remove a non-existent container ID.
containerId = null;
}
}
}
/**
* Copies a file from the container into the local filesystem
* @param from the file to copy in the container
* @param to the location to place the copy
*/
public static void copyFromContainer(Path from, Path to) {
final String script = "docker cp " + containerId + ":" + from + " " + to;
logger.debug("Copying file from container with: " + script);
sh.run(script);
}
/**
* Extends {@link Shell} so that executed commands happen in the currently running Docker container.
*/
public static class DockerShell extends Shell {
@Override
protected String[] getScriptCommand(String script) {
assert containerId != null;
return super.getScriptCommand("docker exec " +
"--user elasticsearch:root " +
"--tty " +
containerId + " " +
script);
}
}
/**
* Checks whether a path exists in the Docker container.
*/
public static boolean existsInContainer(Path path) {
logger.debug("Checking whether file " + path + " exists in container");
final Shell.Result result = dockerShell.runIgnoreExitCode("test -e " + path);
return result.isSuccess();
}
/**
* Checks that the specified path's permissions and ownership match those specified.
*/
public static void assertPermissionsAndOwnership(Path path, Set<PosixFilePermission> expectedPermissions) {
logger.debug("Checking permissions and ownership of [" + path + "]");
final String[] components = dockerShell.run("stat --format=\"%U %G %A\" " + path).stdout.split("\\s+");
final String username = components[0];
final String group = components[1];
final String permissions = components[2];
// The final substring() is because we don't check the directory bit, and we
// also don't want any SELinux security context indicator.
Set<PosixFilePermission> actualPermissions = fromString(permissions.substring(1, 10));
assertEquals("Permissions of " + path + " are wrong", actualPermissions, expectedPermissions);
assertThat("File owner of " + path + " is wrong", username, equalTo("elasticsearch"));
assertThat("File group of " + path + " is wrong", group, equalTo("root"));
}
/**
* Waits for up to 20 seconds for a path to exist in the container.
*/
public static void waitForPathToExist(Path path) throws InterruptedException {
int attempt = 0;
do {
if (existsInContainer(path)) {
return;
}
Thread.sleep(1000);
} while (attempt++ < 20);
fail(path + " failed to exist after 5000ms");
}
/**
* Perform a variety of checks on an installation. If the current distribution is not OSS, additional checks are carried out.
*/
public static void verifyContainerInstallation(Installation installation, Distribution distribution) {
verifyOssInstallation(installation);
if (distribution.flavor == Distribution.Flavor.DEFAULT) {
verifyDefaultInstallation(installation);
}
}
private static void verifyOssInstallation(Installation es) {
dockerShell.run("id elasticsearch");
dockerShell.run("getent group elasticsearch");
final Shell.Result passwdResult = dockerShell.run("getent passwd elasticsearch");
final String homeDir = passwdResult.stdout.trim().split(":")[5];
assertThat(homeDir, equalTo("/usr/share/elasticsearch"));
Stream.of(
es.home,
es.data,
es.logs,
es.config
).forEach(dir -> assertPermissionsAndOwnership(dir, p775));
Stream.of(
es.plugins,
es.modules
).forEach(dir -> assertPermissionsAndOwnership(dir, p755));
// FIXME these files should all have the same permissions
Stream.of(
"elasticsearch.keystore",
// "elasticsearch.yml",
"jvm.options"
// "log4j2.properties"
).forEach(configFile -> assertPermissionsAndOwnership(es.config(configFile), p660));
Stream.of(
"elasticsearch.yml",
"log4j2.properties"
).forEach(configFile -> assertPermissionsAndOwnership(es.config(configFile), p644));
assertThat(
dockerShell.run(es.bin("elasticsearch-keystore") + " list").stdout,
containsString("keystore.seed"));
Stream.of(
es.bin,
es.lib
).forEach(dir -> assertPermissionsAndOwnership(dir, p755));
Stream.of(
"elasticsearch",
"elasticsearch-cli",
"elasticsearch-env",
"elasticsearch-enve",
"elasticsearch-keystore",
"elasticsearch-node",
"elasticsearch-plugin",
"elasticsearch-shard"
).forEach(executable -> assertPermissionsAndOwnership(es.bin(executable), p755));
Stream.of(
"LICENSE.txt",
"NOTICE.txt",
"README.textile"
).forEach(doc -> assertPermissionsAndOwnership(es.home.resolve(doc), p644));
}
private static void verifyDefaultInstallation(Installation es) {
Stream.of(
"elasticsearch-certgen",
"elasticsearch-certutil",
"elasticsearch-croneval",
"elasticsearch-saml-metadata",
"elasticsearch-setup-passwords",
"elasticsearch-sql-cli",
"elasticsearch-syskeygen",
"elasticsearch-users",
"x-pack-env",
"x-pack-security-env",
"x-pack-watcher-env"
).forEach(executable -> assertPermissionsAndOwnership(es.bin(executable), p755));
// at this time we only install the current version of archive distributions, but if that changes we'll need to pass
// the version through here
assertPermissionsAndOwnership(es.bin("elasticsearch-sql-cli-" + getCurrentVersion() + ".jar"), p755);
Stream.of(
"role_mapping.yml",
"roles.yml",
"users",
"users_roles"
).forEach(configFile -> assertPermissionsAndOwnership(es.config(configFile), p660));
}
}

View File

@ -45,6 +45,7 @@ public class FileMatcher extends TypeSafeMatcher<Path> {
public enum Fileness { File, Directory } public enum Fileness { File, Directory }
public static final Set<PosixFilePermission> p775 = fromString("rwxrwxr-x");
public static final Set<PosixFilePermission> p755 = fromString("rwxr-xr-x"); public static final Set<PosixFilePermission> p755 = fromString("rwxr-xr-x");
public static final Set<PosixFilePermission> p750 = fromString("rwxr-x---"); public static final Set<PosixFilePermission> p750 = fromString("rwxr-x---");
public static final Set<PosixFilePermission> p660 = fromString("rw-rw----"); public static final Set<PosixFilePermission> p660 = fromString("rw-rw----");

View File

@ -84,6 +84,20 @@ public class Installation {
); );
} }
public static Installation ofContainer() {
String root = "/usr/share/elasticsearch";
return new Installation(
Paths.get(root),
Paths.get(root + "/config"),
Paths.get(root + "/data"),
Paths.get(root + "/logs"),
Paths.get(root + "/plugins"),
Paths.get(root + "/modules"),
null,
null
);
}
public Path bin(String executableName) { public Path bin(String executableName) {
return bin.resolve(executableName); return bin.resolve(executableName);
} }

View File

@ -65,6 +65,10 @@ public class Platforms {
return new Shell().runIgnoreExitCode("which service").isSuccess(); return new Shell().runIgnoreExitCode("which service").isSuccess();
} }
public static boolean isDocker() {
return new Shell().runIgnoreExitCode("which docker").isSuccess();
}
public static void onWindows(PlatformAction action) throws Exception { public static void onWindows(PlatformAction action) throws Exception {
if (WINDOWS) { if (WINDOWS) {
action.run(); action.run();

View File

@ -93,7 +93,8 @@ public class Shell {
String formattedCommand = String.format(Locale.ROOT, command, args); String formattedCommand = String.format(Locale.ROOT, command, args);
return run(formattedCommand); return run(formattedCommand);
} }
private String[] getScriptCommand(String script) {
protected String[] getScriptCommand(String script) {
if (Platforms.WINDOWS) { if (Platforms.WINDOWS) {
return powershellCommand(script); return powershellCommand(script);
} else { } else {
@ -102,11 +103,11 @@ public class Shell {
} }
private static String[] bashCommand(String script) { private static String[] bashCommand(String script) {
return Stream.concat(Stream.of("bash", "-c"), Stream.of(script)).toArray(String[]::new); return new String[] { "bash", "-c", script };
} }
private static String[] powershellCommand(String script) { private static String[] powershellCommand(String script) {
return Stream.concat(Stream.of("powershell.exe", "-Command"), Stream.of(script)).toArray(String[]::new); return new String[] { "powershell.exe", "-Command", script };
} }
private Result runScript(String[] command) { private Result runScript(String[] command) {

View File

@ -0,0 +1 @@
project.ext.shouldTestDocker = true

View File

@ -0,0 +1 @@
project.ext.shouldTestDocker = true

View File

@ -32,6 +32,8 @@ List projects = [
'distribution:docker:oss-docker-build-context', 'distribution:docker:oss-docker-build-context',
'distribution:docker:oss-ubi-docker-build-context', 'distribution:docker:oss-ubi-docker-build-context',
'distribution:docker:ubi-docker-build-context', 'distribution:docker:ubi-docker-build-context',
'distribution:docker:oss-docker-export',
'distribution:docker:docker-export',
'distribution:packages:oss-deb', 'distribution:packages:oss-deb',
'distribution:packages:deb', 'distribution:packages:deb',
'distribution:packages:oss-no-jdk-deb', 'distribution:packages:oss-no-jdk-deb',