OpenSearch/distribution/docker/build.gradle

316 lines
11 KiB
Groovy

import org.elasticsearch.gradle.Architecture
import org.elasticsearch.gradle.DockerBase
import org.elasticsearch.gradle.ElasticsearchDistribution.Flavor
import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.docker.DockerBuildTask
import org.elasticsearch.gradle.info.BuildParams
import org.elasticsearch.gradle.testfixtures.TestFixturesPlugin
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.test.fixtures'
apply plugin: 'elasticsearch.internal-distribution-download'
apply plugin: 'elasticsearch.rest-resources'
testFixtures.useFixture()
configurations {
aarch64DockerSource
dockerSource
aarch64OssDockerSource
ossDockerSource
}
dependencies {
aarch64DockerSource project(path: ":distribution:archives:linux-aarch64-tar", configuration:"default")
dockerSource project(path: ":distribution:archives:linux-tar", configuration:"default")
aarch64OssDockerSource project(path: ":distribution:archives:oss-linux-aarch64-tar", configuration:"default")
ossDockerSource project(path: ":distribution:archives:oss-linux-tar", configuration:"default")
}
ext.expansions = { Architecture architecture, boolean oss, DockerBase base, boolean local ->
String classifier
if (local) {
if (architecture == Architecture.AARCH64) {
classifier = "linux-aarch64"
} else if (architecture == Architecture.X64) {
classifier = "linux-x86_64"
} else {
throw new IllegalArgumentException("Unsupported architecture [" + architecture + "]")
}
} else {
/* When sourcing the Elasticsearch build remotely, the same Dockerfile needs
* to be able to fetch the artifact for any supported platform. We can't make
* the decision here. Bash will interpolate the `arch` command for us. */
classifier = "linux-\$(arch)"
}
final String elasticsearch = "elasticsearch-${oss ? 'oss-' : ''}${VersionProperties.elasticsearch}-${classifier}.tar.gz"
/* Both the following Dockerfile commands put the resulting artifact at
* the same location, regardless of classifier, so that the commands that
* follow in the Dockerfile don't have to know about the runtime
* architecture. */
String sourceElasticsearch
if (local) {
sourceElasticsearch = "COPY $elasticsearch /opt/elasticsearch.tar.gz"
} else {
sourceElasticsearch = """
RUN curl --retry 8 -S -L \\
--output /opt/elasticsearch.tar.gz \\
https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/$elasticsearch
"""
}
return [
'base_image' : base.getImage(),
'build_date' : BuildParams.buildDate,
'git_revision' : BuildParams.gitRevision,
'license' : oss ? 'Apache-2.0' : 'Elastic-License',
'package_manager' : base == DockerBase.UBI ? 'microdnf' : 'yum',
'source_elasticsearch': sourceElasticsearch,
'docker_base' : base.name().toLowerCase(),
'version' : VersionProperties.elasticsearch
]
}
private static String buildPath(Architecture architecture, boolean oss, DockerBase base) {
return 'build/' +
(architecture == Architecture.AARCH64 ? 'aarch64-' : '') +
(oss ? 'oss-' : '') +
(base == DockerBase.UBI ? 'ubi-' : '') +
'docker'
}
private static String taskName(String prefix, Architecture architecture, boolean oss, DockerBase base, String suffix) {
return prefix +
(architecture == Architecture.AARCH64 ? 'Aarch64' : '') +
(oss ? 'Oss' : '') +
(base == DockerBase.UBI ? 'Ubi' : '') +
suffix
}
project.ext {
dockerBuildContext = { Architecture architecture, boolean oss, DockerBase base, boolean local ->
copySpec {
into('bin') {
from project.projectDir.toPath().resolve("src/docker/bin")
}
into('config') {
/*
* The OSS and default distributions have different configurations, therefore we want to allow overriding the default configuration
* from files in the 'oss' sub-directory. We don't want the 'oss' sub-directory to appear in the final build context, however.
*/
duplicatesStrategy = DuplicatesStrategy.EXCLUDE
from(project.projectDir.toPath().resolve("src/docker/config")) {
exclude 'oss'
}
if (oss) {
// Overlay the config file
from project.projectDir.toPath().resolve("src/docker/config/oss")
}
}
from(project.projectDir.toPath().resolve("src/docker/Dockerfile")) {
expand(expansions(architecture, oss, base, local))
}
}
}
}
void addCopyDockerContextTask(Architecture architecture, boolean oss, DockerBase base) {
if (oss && base != DockerBase.CENTOS) {
throw new GradleException("The only allowed docker base image for OSS builds is CENTOS")
}
tasks.register(taskName("copy", architecture, oss, base, "DockerContext"), Sync) {
expansions(architecture, oss, base, true).findAll { it.key != 'build_date' }.each { k, v ->
inputs.property(k, { v.toString() })
}
into buildPath(architecture, oss, base)
with dockerBuildContext(architecture, oss, base, true)
if (architecture == Architecture.AARCH64) {
if (oss) {
from configurations.aarch64OssDockerSource
} else {
from configurations.aarch64DockerSource
}
} else {
if (oss) {
from configurations.ossDockerSource
} else {
from configurations.dockerSource
}
}
}
}
def createAndSetWritable(Object... locations) {
locations.each { location ->
File file = file(location)
file.mkdirs()
file.setWritable(true, false)
}
}
tasks.register("copyKeystore", Sync) {
from project(':x-pack:plugin:core')
.file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks')
into "${buildDir}/certs"
doLast {
file("${buildDir}/certs").setReadable(true, false)
file("${buildDir}/certs/testnode.jks").setReadable(true, false)
}
}
elasticsearch_distributions {
Architecture.values().each { eachArchitecture ->
Flavor.values().each { distroFlavor ->
"docker_$distroFlavor${ eachArchitecture == Architecture.AARCH64 ? '_aarch64' : '' }" {
architecture = eachArchitecture
flavor = distroFlavor
type = 'docker'
version = VersionProperties.getElasticsearch()
failIfUnavailable = false // This ensures we don't attempt to build images if docker is unavailable
}
}
}
}
tasks.named("preProcessFixture").configure {
dependsOn elasticsearch_distributions.docker_default, elasticsearch_distributions.docker_oss
dependsOn "copyKeystore"
doLast {
// tests expect to have an empty repo
project.delete(
"${buildDir}/repo",
"${buildDir}/oss-repo"
)
createAndSetWritable(
"${buildDir}/repo",
"${buildDir}/oss-repo",
"${buildDir}/logs/default-1",
"${buildDir}/logs/default-2",
"${buildDir}/logs/oss-1",
"${buildDir}/logs/oss-2"
)
}
}
tasks.named("processTestResources").configure {
from project(':x-pack:plugin:core')
.file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks')
}
tasks.register("integTest", Test) {
outputs.doNotCacheIf('Build cache is disabled for Docker tests') { true }
maxParallelForks = '1'
include '**/*IT.class'
}
tasks.named("check").configure {
dependsOn "integTest"
}
void addBuildDockerImage(Architecture architecture, boolean oss, DockerBase base) {
if (oss && base != DockerBase.CENTOS) {
throw new GradleException("The only allowed docker base image for OSS builds is CENTOS")
}
final TaskProvider<DockerBuildTask> buildDockerImageTask =
tasks.register(taskName("build", architecture, oss, base, "DockerImage"), DockerBuildTask) {
onlyIf { Architecture.current() == architecture }
TaskProvider<Sync> copyContextTask = tasks.named(taskName("copy", architecture, oss, base, "DockerContext"))
dependsOn(copyContextTask)
dockerContext.fileProvider(copyContextTask.map { it.destinationDir })
String version = VersionProperties.elasticsearch
if (oss) {
tags = [
"docker.elastic.co/elasticsearch/elasticsearch-oss:${version}",
"elasticsearch-oss:test"
]
} else {
String suffix = base == DockerBase.UBI ? '-ubi8' : ''
tags = [
"elasticsearch${suffix}:${version}",
"docker.elastic.co/elasticsearch/elasticsearch${suffix}:${version}",
"docker.elastic.co/elasticsearch/elasticsearch-full${suffix}:${version}",
"elasticsearch${suffix}:test",
]
}
}
tasks.named("assemble").configure {
dependsOn(buildDockerImageTask)
}
}
for (final Architecture architecture : Architecture.values()) {
for (final DockerBase base : DockerBase.values()) {
for (final boolean oss : [false, true]) {
if (oss && base != DockerBase.CENTOS) {
// We only create Docker images for the OSS distribution on CentOS.
// Other bases only use the default distribution.
continue
}
addCopyDockerContextTask(architecture, oss, base)
addBuildDockerImage(architecture, oss, base)
}
}
}
// We build the images used in compose locally, but the pull command insists on using a repository
// thus we must disable it to prevent it from doing so.
// Everything will still be pulled since we will build the local images on a pull
if (tasks.findByName("composePull")) {
tasks.composePull.enabled = false
}
/*
* The export subprojects write out the generated Docker images to disk, so
* that they can be easily reloaded, for example into a VM for distribution testing
*/
subprojects { Project subProject ->
if (subProject.name.endsWith('-export')) {
apply plugin: 'distribution'
final Architecture architecture = subProject.name.contains('aarch64-') ? Architecture.AARCH64 : Architecture.X64
final boolean oss = subProject.name.contains('oss-')
final DockerBase base = subProject.name.contains('ubi-') ? DockerBase.UBI : DockerBase.CENTOS
final String arch = architecture == Architecture.AARCH64 ? '-aarch64' : ''
final String suffix = oss ? '-oss' : base == DockerBase.UBI ? '-ubi8' : ''
final String extension = base == DockerBase.UBI ? 'ubi.tar' : 'docker.tar'
final String artifactName = "elasticsearch${arch}${suffix}_test"
final String exportTaskName = taskName("export", architecture, oss, base, "DockerImage")
final String buildTaskName = taskName("build", architecture, oss, base, "DockerImage")
final String tarFile = "${parent.projectDir}/build/${artifactName}_${VersionProperties.elasticsearch}.${extension}"
tasks.register(exportTaskName, LoggedExec) {
inputs.file("${parent.projectDir}/build/markers/${buildTaskName}.marker")
executable 'docker'
outputs.file(tarFile)
args "save",
"-o",
tarFile,
"elasticsearch${suffix}:test"
dependsOn(parent.path + ":" + buildTaskName)
onlyIf { Architecture.current() == architecture }
}
artifacts.add('default', file(tarFile)) {
type 'tar'
name artifactName
builtBy exportTaskName
}
tasks.named("assemble").configure {
dependsOn(exportTaskName)
}
}
}