Move classes from build scripts to buildSrc (#57197) (#57512)

* Move classes from build scripts to buildSrc

- move Run task
- move duplicate SanEvaluator

* Remove :run workaround

* Some little cleanup on build scripts on the way
This commit is contained in:
Rene Groeschke 2020-06-02 15:33:53 +02:00 committed by GitHub
parent bd188f4a21
commit 8584da40af
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 313 additions and 476 deletions

View File

@ -47,6 +47,7 @@ apply from: 'gradle/forbidden-dependencies.gradle'
apply from: 'gradle/formatting.gradle'
apply from: 'gradle/local-distribution.gradle'
apply from: 'gradle/fips.gradle'
apply from: 'gradle/run.gradle'
// common maven publishing configuration
allprojects {
@ -383,43 +384,6 @@ allprojects {
tasks.named('eclipse') { dependsOn 'cleanEclipse', 'copyEclipseSettings' }
}
// we need to add the same --debug-jvm option as
// the real RunTask has, so we can pass it through
class Run extends DefaultTask {
boolean debug = false
@Option(
option = "debug-jvm",
description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch."
)
public void setDebug(boolean enabled) {
project.project(':distribution').run.debug = enabled
}
@Option(
option = "data-dir",
description = "Override the base data directory used by the testcluster"
)
public void setDataDir(String dataDirStr) {
project.project(':distribution').run.dataDir = dataDirStr
}
@Option(
option = "keystore-password",
description = "Set the elasticsearch keystore password"
)
public void setKeystorePassword(String password) {
project.project(':distribution').run.keystorePassword = password
}
}
tasks.register("run", Run) {
dependsOn ':distribution:run'
description = 'Runs elasticsearch in the foreground'
group = 'Verification'
impliesSubProjects = true
}
wrapper {
distributionType = 'ALL'
doLast {

View File

@ -189,10 +189,8 @@ if (project != rootProject) {
testingConventions.enabled = false
}
configurations {
distribution
reaper
}
configurations.register("distribution")
configurations.register("reaper")
dependencies {
reaper project('reaper')

View File

@ -0,0 +1,200 @@
package org.elasticsearch.gradle.network;
import java.io.IOException;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
/**
* A lazy evaluator to find the san to use for certificate generation.
*/
public class SanEvaluator {
private static String san = null;
public String toString() {
synchronized (SanEvaluator.class) {
if (san == null) {
san = getSubjectAlternativeNameString();
}
}
return san;
}
// Code stolen from NetworkUtils/InetAddresses/NetworkAddress to support SAN
/**
* Return all interfaces (and subinterfaces) on the system
*/
private static List<NetworkInterface> getInterfaces() throws SocketException {
List<NetworkInterface> all = new ArrayList<>();
addAllInterfaces(all, Collections.list(NetworkInterface.getNetworkInterfaces()));
Collections.sort(all, new Comparator<NetworkInterface>() {
@Override
public int compare(NetworkInterface left, NetworkInterface right) {
return Integer.compare(left.getIndex(), right.getIndex());
}
});
return all;
}
/**
* Helper for getInterfaces, recursively adds subinterfaces to {@code target}
*/
private static void addAllInterfaces(List<NetworkInterface> target, List<NetworkInterface> level) {
if (!level.isEmpty()) {
target.addAll(level);
for (NetworkInterface intf : level) {
addAllInterfaces(target, Collections.list(intf.getSubInterfaces()));
}
}
}
private static String getSubjectAlternativeNameString() {
List<InetAddress> list = new ArrayList<>();
try {
for (NetworkInterface intf : getInterfaces()) {
for (final InetAddress address : Collections.list(intf.getInetAddresses())) {
/*
* Some OS (e.g., BSD) assign a link-local address to the loopback interface.
* While technically not a loopback interface, some of these OS treat them as one (e.g., localhost on macOS),
* so we must too. Otherwise, things just won't work out of the box. So we include all addresses from
* loopback interfaces.
*
* By checking if the interface is a loopback interface or the address is a loopback address first,
* we avoid having to check if the interface is up unless necessary.
* This means we can avoid checking if the interface is up for virtual ethernet devices which have
* a tendency to disappear outside of our control (e.g., due to Docker).
*/
if ((intf.isLoopback() || address.isLoopbackAddress()) && isUp(intf, address)) {
list.add(address);
}
}
}
if (list.isEmpty()) {
throw new IllegalArgumentException("no up-and-running loopback addresses found, got " + getInterfaces());
}
StringBuilder builder = new StringBuilder("san=");
for (int i = 0; i < list.size(); i++) {
InetAddress address = list.get(i);
String hostAddress;
if (address instanceof Inet6Address) {
hostAddress = compressedIPV6Address((Inet6Address) address);
} else {
hostAddress = address.getHostAddress();
}
builder.append("ip:").append(hostAddress);
String hostname = address.getHostName();
if (hostname.equals(address.getHostAddress()) == false) {
builder.append(",dns:").append(hostname);
}
if (i != (list.size() - 1)) {
builder.append(",");
}
}
return builder.toString();
} catch (IOException e) {
throw new IllegalStateException("Cannot resolve alternative name string", e);
}
}
private static boolean isUp(final NetworkInterface intf, final InetAddress address) throws IOException {
try {
return intf.isUp();
} catch (final SocketException e) {
/*
* In Elasticsearch production code (NetworkUtils) we suppress this if the device is a virtual ethernet device.
* That should not happen here since the interface must be a loopback device or the address a loopback address
* to get here to begin with.
*/
assert intf.isLoopback() || address.isLoopbackAddress();
throw new IOException("failed to check if interface [" + intf.getName() + "] is up", e);
}
}
private static String compressedIPV6Address(Inet6Address inet6Address) {
byte[] bytes = inet6Address.getAddress();
int[] hextets = new int[8];
for (int i = 0; i < hextets.length; i++) {
hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255;
}
compressLongestRunOfZeroes(hextets);
return hextetsToIPv6String(hextets);
}
/**
* Identify and mark the longest run of zeroes in an IPv6 address.
*
* <p>Only runs of two or more hextets are considered. In case of a tie, the
* leftmost run wins. If a qualifying run is found, its hextets are replaced
* by the sentinel value -1.
*
* @param hextets {@code int[]} mutable array of eight 16-bit hextets
*/
private static void compressLongestRunOfZeroes(int[] hextets) {
int bestRunStart = -1;
int bestRunLength = -1;
int runStart = -1;
for (int i = 0; i < hextets.length + 1; i++) {
if (i < hextets.length && hextets[i] == 0) {
if (runStart < 0) {
runStart = i;
}
} else if (runStart >= 0) {
int runLength = i - runStart;
if (runLength > bestRunLength) {
bestRunStart = runStart;
bestRunLength = runLength;
}
runStart = -1;
}
}
if (bestRunLength >= 2) {
Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1);
}
}
/**
* Convert a list of hextets into a human-readable IPv6 address.
*
* <p>In order for "::" compression to work, the input should contain negative
* sentinel values in place of the elided zeroes.
*
* @param hextets {@code int[]} array of eight 16-bit hextets, or -1s
*/
private static String hextetsToIPv6String(int[] hextets) {
/*
* While scanning the array, handle these state transitions:
* start->num => "num" start->gap => "::"
* num->num => ":num" num->gap => "::"
* gap->num => "num" gap->gap => ""
*/
StringBuilder buf = new StringBuilder(39);
boolean lastWasNumber = false;
for (int i = 0; i < hextets.length; i++) {
boolean thisIsNumber = hextets[i] >= 0;
if (thisIsNumber) {
if (lastWasNumber) {
buf.append(':');
}
buf.append(Integer.toHexString(hextets[i]));
} else {
if (i == 0 || lastWasNumber) {
buf.append("::");
}
}
lastWasNumber = thisIsNumber;
}
return buf.toString();
}
}

View File

@ -24,19 +24,19 @@ import org.elasticsearch.gradle.DependenciesInfoTask
import org.elasticsearch.gradle.MavenFilteringHack
import org.elasticsearch.gradle.NoticeTask
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.testclusters.RunTask
import java.nio.file.Files
import java.nio.file.Path
apply plugin: 'elasticsearch.testclusters'
plugins {
id 'base'
}
/*****************************************************************************
* Third party dependencies report *
*****************************************************************************/
// Concatenates the dependencies CSV files into a single file
task generateDependenciesReport(type: ConcatFilesTask) {
tasks.register("generateDependenciesReport", ConcatFilesTask) {
dependsOn rootProject.allprojects.collect { it.tasks.withType(DependenciesInfoTask) }
files = fileTree(dir: project.rootDir, include: '**/dependencies.csv')
headerLine = "name,version,url,license,sourceURL"
@ -76,56 +76,56 @@ String defaultOutputs = 'build/outputs/default'
String systemdOutputs = 'build/outputs/systemd'
String transportOutputs = 'build/outputs/transport-only'
task processOssOutputs(type: Sync) {
tasks.register("processOssOutputs", Sync) {
into ossOutputs
}
task processDefaultOutputs(type: Sync) {
tasks.register("processDefaultOutputs", Sync) {
into defaultOutputs
from processOssOutputs
}
task processSystemdOutputs(type: Sync) {
tasks.register("processSystemdOutputs", Sync) {
into systemdOutputs
}
// Integ tests work over the rest http layer, so we need a transport included with the integ test zip.
// All transport modules are included so that they may be randomized for testing
task processTransportOutputs(type: Sync) {
tasks.register("processTransportOutputs", Sync) {
into transportOutputs
}
// these are dummy tasks that can be used to depend on the relevant sub output dir
task buildOssModules {
dependsOn processOssOutputs
tasks.register("buildOssModules") {
dependsOn "processOssOutputs"
outputs.dir "${ossOutputs}/modules"
}
task buildOssBin {
dependsOn processOssOutputs
tasks.register("buildOssBin") {
dependsOn "processOssOutputs"
outputs.dir "${ossOutputs}/bin"
}
task buildOssConfig {
dependsOn processOssOutputs
tasks.register("buildOssConfig") {
dependsOn "processOssOutputs"
outputs.dir "${ossOutputs}/config"
}
task buildDefaultModules {
dependsOn processDefaultOutputs
tasks.register("buildDefaultModules") {
dependsOn "processDefaultOutputs"
outputs.dir "${defaultOutputs}/modules"
}
task buildDefaultBin {
dependsOn processDefaultOutputs
tasks.register("buildDefaultBin") {
dependsOn "processDefaultOutputs"
outputs.dir "${defaultOutputs}/bin"
}
task buildDefaultConfig {
dependsOn processDefaultOutputs
tasks.register("buildDefaultConfig") {
dependsOn "processDefaultOutputs"
outputs.dir "${defaultOutputs}/config"
}
task buildSystemdModule {
dependsOn processSystemdOutputs
tasks.register("buildSystemdModule") {
dependsOn "processSystemdOutputs"
outputs.dir "${systemdOutputs}/modules"
}
task buildTransportModules {
dependsOn processTransportOutputs
tasks.register("buildTransportModules") {
dependsOn "processTransportOutputs"
outputs.dir "${transportOutputs}/modules"
}
@ -156,14 +156,14 @@ void copyModule(Sync copyTask, Project module) {
}
// log4j config could be contained in modules, so we must join it together using these tasks
task buildOssLog4jConfig {
dependsOn processOssOutputs
tasks.register("buildOssLog4jConfig") {
dependsOn "processOssOutputs"
ext.contents = []
ext.log4jFile = file("${ossOutputs}/log4j2.properties")
outputs.file log4jFile
}
task buildDefaultLog4jConfig {
dependsOn processDefaultOutputs
tasks.register("buildDefaultLog4jConfig") {
dependsOn "processDefaultOutputs"
ext.contents = []
ext.log4jFile = file("${defaultOutputs}/log4j2.properties")
outputs.file log4jFile
@ -176,8 +176,12 @@ Closure writeLog4jProperties = {
it.log4jFile.append(moduleLog4jProperties, 'UTF-8')
}
}
buildOssLog4jConfig.doLast(writeLog4jProperties)
buildDefaultLog4jConfig.doLast(writeLog4jProperties)
tasks.named("buildOssLog4jConfig").configure {
doLast(writeLog4jProperties)
}
tasks.named("buildDefaultLog4jConfig").configure {
doLast(writeLog4jProperties)
}
// copy log4j2.properties from modules that have it
void copyLog4jProperties(Task buildTask, Project module) {
@ -238,12 +242,6 @@ xpack.subprojects.findAll { it.parent == xpack }.each { Project xpackModule ->
copyModule(processSystemdOutputs, project(':modules:systemd'))
// make sure we have a clean task since we aren't a java project, but we have tasks that
// put stuff in the build dir
task clean(type: Delete) {
delete 'build'
}
configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
apply plugin: 'elasticsearch.jdk-download'
@ -437,30 +435,6 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
}
}
testClusters {
runTask {
testDistribution = System.getProperty('run.distribution', 'default')
if (System.getProperty('run.distribution', 'default') == 'default') {
String licenseType = System.getProperty("run.license_type", "basic")
if (licenseType == 'trial') {
setting 'xpack.ml.enabled', 'true'
setting 'xpack.graph.enabled', 'true'
setting 'xpack.watcher.enabled', 'true'
setting 'xpack.license.self_generated.type', 'trial'
} else if (licenseType != 'basic') {
throw new IllegalArgumentException("Unsupported self-generated license type: [" + licenseType + "[basic] or [trial].")
}
setting 'xpack.security.enabled', 'true'
keystore 'bootstrap.password', 'password'
user username: 'elastic-admin', password: 'elastic-password', role: 'superuser'
}
}
}
task run(type: RunTask) {
useCluster testClusters.runTask;
}
/**
* Build some variables that are replaced in the packages. This includes both
* scripts like bin/elasticsearch and bin/elasticsearch-plugin that a user might run and also

47
gradle/run.gradle Normal file
View File

@ -0,0 +1,47 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.elasticsearch.gradle.testclusters.RunTask
apply plugin: 'elasticsearch.testclusters'
testClusters {
runTask {
testDistribution = System.getProperty('run.distribution', 'default')
if (System.getProperty('run.distribution', 'default') == 'default') {
String licenseType = System.getProperty("run.license_type", "basic")
if (licenseType == 'trial') {
setting 'xpack.ml.enabled', 'true'
setting 'xpack.graph.enabled', 'true'
setting 'xpack.watcher.enabled', 'true'
setting 'xpack.license.self_generated.type', 'trial'
} else if (licenseType != 'basic') {
throw new IllegalArgumentException("Unsupported self-generated license type: [" + licenseType + "[basic] or [trial].")
}
setting 'xpack.security.enabled', 'true'
keystore 'bootstrap.password', 'password'
user username: 'elastic-admin', password: 'elastic-password', role: 'superuser'
}
}
}
tasks.register("run", RunTask) {
useCluster testClusters.runTask;
description = 'Runs elasticsearch in the foreground'
group = 'Verification'
}

View File

@ -1,4 +1,5 @@
import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.network.SanEvaluator
import org.elasticsearch.gradle.info.BuildParams
import org.gradle.internal.jvm.Jvm
@ -18,7 +19,7 @@ File keystoreDir = new File(project.buildDir, 'keystore')
// Generate the node's keystore
File nodeKeystore = file("$keystoreDir/test-node.jks")
task createNodeKeyStore(type: LoggedExec) {
tasks.register("createNodeKeyStore", LoggedExec) {
doFirst {
if (nodeKeystore.parentFile.exists() == false) {
nodeKeystore.parentFile.mkdirs()
@ -38,12 +39,12 @@ task createNodeKeyStore(type: LoggedExec) {
'-dname', 'CN=' + host,
'-keypass', 'keypass',
'-storepass', 'keypass',
'-ext', san
'-ext', san.toString()
}
// Generate the client's keystore
File clientKeyStore = file("$keystoreDir/test-client.jks")
task createClientKeyStore(type: LoggedExec) {
tasks.register("createClientKeyStore", LoggedExec) {
doFirst {
if (clientKeyStore.parentFile.exists() == false) {
clientKeyStore.parentFile.mkdirs()
@ -63,13 +64,13 @@ task createClientKeyStore(type: LoggedExec) {
'-dname', 'CN=' + host,
'-keypass', 'keypass',
'-storepass', 'keypass',
'-ext', san
'-ext', san.toString()
}
// Export the node's certificate
File nodeCertificate = file("$keystoreDir/test-node.cert")
task exportNodeCertificate(type: LoggedExec) {
dependsOn createNodeKeyStore
tasks.register("exportNodeCertificate", LoggedExec) {
dependsOn "createNodeKeyStore"
doFirst {
if (nodeCertificate.parentFile.exists() == false) {
nodeCertificate.parentFile.mkdirs()
@ -87,8 +88,8 @@ task exportNodeCertificate(type: LoggedExec) {
}
// Import the node certificate in the client's keystore
task importNodeCertificateInClientKeyStore(type: LoggedExec) {
dependsOn createClientKeyStore, exportNodeCertificate
tasks.register("importNodeCertificateInClientKeyStore", LoggedExec) {
dependsOn "createClientKeyStore", "exportNodeCertificate"
executable = "${BuildParams.runtimeJavaHome}/bin/keytool"
args '-import',
'-alias', 'test-node',
@ -100,8 +101,8 @@ task importNodeCertificateInClientKeyStore(type: LoggedExec) {
// Export the client's certificate
File clientCertificate = file("$keystoreDir/test-client.cert")
task exportClientCertificate(type: LoggedExec) {
dependsOn createClientKeyStore
tasks.register("exportClientCertificate", LoggedExec) {
dependsOn "createClientKeyStore"
doFirst {
if (clientCertificate.parentFile.exists() == false) {
clientCertificate.parentFile.mkdirs()
@ -119,8 +120,8 @@ task exportClientCertificate(type: LoggedExec) {
}
// Import the client certificate in the node's keystore
task importClientCertificateInNodeKeyStore(type: LoggedExec) {
dependsOn createNodeKeyStore, exportClientCertificate
tasks.register("importClientCertificateInNodeKeyStore", LoggedExec) {
dependsOn "createNodeKeyStore", "exportClientCertificate"
executable = "${BuildParams.runtimeJavaHome}/bin/keytool"
args '-import',
'-alias', 'test-client',
@ -136,10 +137,10 @@ forbiddenPatterns {
// Add keystores to test classpath: it expects it there
sourceSets.test.resources.srcDir(keystoreDir)
processTestResources.dependsOn(importNodeCertificateInClientKeyStore, importClientCertificateInNodeKeyStore)
processTestResources.dependsOn("importNodeCertificateInClientKeyStore", "importClientCertificateInNodeKeyStore")
integTest.runner {
dependsOn(importClientCertificateInNodeKeyStore)
dependsOn("importClientCertificateInNodeKeyStore")
onlyIf {
// Do not attempt to form a cluster in a FIPS JVM, as doing so with a JKS keystore will fail.
// TODO Revisit this when SQL CLI client can handle key/certificate instead of only Keystores.
@ -165,177 +166,3 @@ testClusters.integTest {
extraConfigFile nodeKeystore.name, nodeKeystore
extraConfigFile clientKeyStore.name, clientKeyStore
}
/** A lazy evaluator to find the san to use for certificate generation. */
class SanEvaluator {
private static String san = null
String toString() {
synchronized (SanEvaluator.class) {
if (san == null) {
san = getSubjectAlternativeNameString()
}
}
return san
}
// Code stolen from NetworkUtils/InetAddresses/NetworkAddress to support SAN
/** Return all interfaces (and subinterfaces) on the system */
private static List<NetworkInterface> getInterfaces() throws SocketException {
List<NetworkInterface> all = new ArrayList<>();
addAllInterfaces(all, Collections.list(NetworkInterface.getNetworkInterfaces()));
Collections.sort(all, new Comparator<NetworkInterface>() {
@Override
public int compare(NetworkInterface left, NetworkInterface right) {
return Integer.compare(left.getIndex(), right.getIndex());
}
});
return all;
}
/** Helper for getInterfaces, recursively adds subinterfaces to {@code target} */
private static void addAllInterfaces(List<NetworkInterface> target, List<NetworkInterface> level) {
if (!level.isEmpty()) {
target.addAll(level);
for (NetworkInterface intf : level) {
addAllInterfaces(target, Collections.list(intf.getSubInterfaces()));
}
}
}
private static String getSubjectAlternativeNameString() {
List<InetAddress> list = new ArrayList<>();
for (NetworkInterface intf : getInterfaces()) {
for (final InetAddress address : Collections.list(intf.getInetAddresses())) {
/*
* Some OS (e.g., BSD) assign a link-local address to the loopback interface. While technically not a loopback interface, some of
* these OS treat them as one (e.g., localhost on macOS), so we must too. Otherwise, things just won't work out of the box. So we
* include all addresses from loopback interfaces.
*
* By checking if the interface is a loopback interface or the address is a loopback address first, we avoid having to check if the
* interface is up unless necessary. This means we can avoid checking if the interface is up for virtual ethernet devices which have
* a tendency to disappear outside of our control (e.g., due to Docker).
*/
if ((intf.isLoopback() || address.isLoopbackAddress()) && isUp(intf, address)) {
list.add(address)
}
}
}
if (list.isEmpty()) {
throw new IllegalArgumentException("no up-and-running loopback addresses found, got " + getInterfaces());
}
StringBuilder builder = new StringBuilder("san=");
for (int i = 0; i < list.size(); i++) {
InetAddress address = list.get(i);
String hostAddress;
if (address instanceof Inet6Address) {
hostAddress = compressedIPV6Address((Inet6Address) address);
} else {
hostAddress = address.getHostAddress();
}
builder.append("ip:").append(hostAddress);
String hostname = address.getHostName();
if (hostname.equals(address.getHostAddress()) == false) {
builder.append(",dns:").append(hostname);
}
if (i != (list.size() - 1)) {
builder.append(",");
}
}
return builder.toString();
}
private static boolean isUp(final NetworkInterface intf, final InetAddress address) throws IOException {
try {
return intf.isUp();
} catch (final SocketException e) {
/*
* In Elasticsearch production code (NetworkUtils) we suppress this if the device is a virtual ethernet device. That should not happen
* here since the interface must be a loopback device or the address a loopback address to get here to begin with.
*/
assert intf.isLoopback() || address.isLoopbackAddress()
throw new IOException("failed to check if interface [" + intf.getName() + "] is up", e)
}
}
private static String compressedIPV6Address(Inet6Address inet6Address) {
byte[] bytes = inet6Address.getAddress();
int[] hextets = new int[8];
for (int i = 0; i < hextets.length; i++) {
hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255;
}
compressLongestRunOfZeroes(hextets);
return hextetsToIPv6String(hextets);
}
/**
* Identify and mark the longest run of zeroes in an IPv6 address.
*
* <p>Only runs of two or more hextets are considered. In case of a tie, the
* leftmost run wins. If a qualifying run is found, its hextets are replaced
* by the sentinel value -1.
*
* @param hextets {@code int[]} mutable array of eight 16-bit hextets
*/
private static void compressLongestRunOfZeroes(int[] hextets) {
int bestRunStart = -1;
int bestRunLength = -1;
int runStart = -1;
for (int i = 0; i < hextets.length + 1; i++) {
if (i < hextets.length && hextets[i] == 0) {
if (runStart < 0) {
runStart = i;
}
} else if (runStart >= 0) {
int runLength = i - runStart;
if (runLength > bestRunLength) {
bestRunStart = runStart;
bestRunLength = runLength;
}
runStart = -1;
}
}
if (bestRunLength >= 2) {
Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1);
}
}
/**
* Convert a list of hextets into a human-readable IPv6 address.
*
* <p>In order for "::" compression to work, the input should contain negative
* sentinel values in place of the elided zeroes.
*
* @param hextets {@code int[]} array of eight 16-bit hextets, or -1s
*/
private static String hextetsToIPv6String(int[] hextets) {
/*
* While scanning the array, handle these state transitions:
* start->num => "num" start->gap => "::"
* num->num => ":num" num->gap => "::"
* gap->num => "num" gap->gap => ""
*/
StringBuilder buf = new StringBuilder(39);
boolean lastWasNumber = false;
for (int i = 0; i < hextets.length; i++) {
boolean thisIsNumber = hextets[i] >= 0;
if (thisIsNumber) {
if (lastWasNumber) {
buf.append(':');
}
buf.append(Integer.toHexString(hextets[i]));
} else {
if (i == 0 || lastWasNumber) {
buf.append("::");
}
}
lastWasNumber = thisIsNumber;
}
return buf.toString();
}
}

View File

@ -1,4 +1,5 @@
import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.network.SanEvaluator
import org.elasticsearch.gradle.info.BuildParams
import org.gradle.internal.jvm.Jvm
@ -15,7 +16,7 @@ File keystoreDir = new File(project.buildDir, 'keystore')
// Generate the node's keystore
File nodeKeystore = file("$keystoreDir/test-node.jks")
task createNodeKeyStore(type: LoggedExec) {
tasks.register("createNodeKeyStore", LoggedExec) {
doFirst {
if (nodeKeystore.parentFile.exists() == false) {
nodeKeystore.parentFile.mkdirs()
@ -35,12 +36,12 @@ task createNodeKeyStore(type: LoggedExec) {
'-dname', 'CN=smoke-test-plugins-ssl',
'-keypass', 'keypass',
'-storepass', 'keypass',
'-ext', san
'-ext', san.toString()
}
// Generate the client's keystore
File clientKeyStore = file("$keystoreDir/test-client.jks")
task createClientKeyStore(type: LoggedExec) {
tasks.register("createClientKeyStore", LoggedExec) {
doFirst {
if (clientKeyStore.parentFile.exists() == false) {
clientKeyStore.parentFile.mkdirs()
@ -60,13 +61,13 @@ task createClientKeyStore(type: LoggedExec) {
'-dname', 'CN=smoke-test-plugins-ssl',
'-keypass', 'keypass',
'-storepass', 'keypass',
'-ext', san
'-ext', san.toString()
}
// Export the node's certificate
File nodeCertificate = file("$keystoreDir/test-node.cert")
task exportNodeCertificate(type: LoggedExec) {
dependsOn createNodeKeyStore
tasks.register("exportNodeCertificate", LoggedExec) {
dependsOn "createNodeKeyStore"
doFirst {
if (nodeCertificate.parentFile.exists() == false) {
nodeCertificate.parentFile.mkdirs()
@ -84,8 +85,8 @@ task exportNodeCertificate(type: LoggedExec) {
}
// Import the node certificate in the client's keystore
task importNodeCertificateInClientKeyStore(type: LoggedExec) {
dependsOn createClientKeyStore, exportNodeCertificate
tasks.register("importNodeCertificateInClientKeyStore", LoggedExec) {
dependsOn "createClientKeyStore", "exportNodeCertificate"
executable = "${BuildParams.runtimeJavaHome}/bin/keytool"
args '-import',
'-alias', 'test-node',
@ -97,8 +98,8 @@ task importNodeCertificateInClientKeyStore(type: LoggedExec) {
// Export the client's certificate
File clientCertificate = file("$keystoreDir/test-client.cert")
task exportClientCertificate(type: LoggedExec) {
dependsOn createClientKeyStore
tasks.register("exportClientCertificate", LoggedExec) {
dependsOn "createClientKeyStore"
doFirst {
if (clientCertificate.parentFile.exists() == false) {
clientCertificate.parentFile.mkdirs()
@ -116,8 +117,8 @@ task exportClientCertificate(type: LoggedExec) {
}
// Import the client certificate in the node's keystore
task importClientCertificateInNodeKeyStore(type: LoggedExec) {
dependsOn createNodeKeyStore, exportClientCertificate
tasks.register("importClientCertificateInNodeKeyStore", LoggedExec) {
dependsOn "createNodeKeyStore", "exportClientCertificate"
executable = "${BuildParams.runtimeJavaHome}/bin/keytool"
args '-import',
'-alias', 'test-client',
@ -133,7 +134,7 @@ forbiddenPatterns {
// Add keystores to test classpath: it expects it there
sourceSets.test.resources.srcDir(keystoreDir)
processTestResources.dependsOn(importNodeCertificateInClientKeyStore, importClientCertificateInNodeKeyStore)
processTestResources.dependsOn("importNodeCertificateInClientKeyStore", "importClientCertificateInNodeKeyStore")
integTest.runner {
dependsOn(importClientCertificateInNodeKeyStore)
@ -162,177 +163,3 @@ testClusters.integTest {
extraConfigFile nodeKeystore.name, nodeKeystore
extraConfigFile clientKeyStore.name, clientKeyStore
}
/** A lazy evaluator to find the san to use for certificate generation. */
class SanEvaluator {
private static String san = null
String toString() {
synchronized (SanEvaluator.class) {
if (san == null) {
san = getSubjectAlternativeNameString()
}
}
return san
}
// Code stolen from NetworkUtils/InetAddresses/NetworkAddress to support SAN
/** Return all interfaces (and subinterfaces) on the system */
private static List<NetworkInterface> getInterfaces() throws SocketException {
List<NetworkInterface> all = new ArrayList<>();
addAllInterfaces(all, Collections.list(NetworkInterface.getNetworkInterfaces()));
Collections.sort(all, new Comparator<NetworkInterface>() {
@Override
public int compare(NetworkInterface left, NetworkInterface right) {
return Integer.compare(left.getIndex(), right.getIndex());
}
});
return all;
}
/** Helper for getInterfaces, recursively adds subinterfaces to {@code target} */
private static void addAllInterfaces(List<NetworkInterface> target, List<NetworkInterface> level) {
if (!level.isEmpty()) {
target.addAll(level);
for (NetworkInterface intf : level) {
addAllInterfaces(target, Collections.list(intf.getSubInterfaces()));
}
}
}
private static String getSubjectAlternativeNameString() {
List<InetAddress> list = new ArrayList<>();
for (NetworkInterface intf : getInterfaces()) {
for (final InetAddress address : Collections.list(intf.getInetAddresses())) {
/*
* Some OS (e.g., BSD) assign a link-local address to the loopback interface. While technically not a loopback interface, some of
* these OS treat them as one (e.g., localhost on macOS), so we must too. Otherwise, things just won't work out of the box. So we
* include all addresses from loopback interfaces.
*
* By checking if the interface is a loopback interface or the address is a loopback address first, we avoid having to check if the
* interface is up unless necessary. This means we can avoid checking if the interface is up for virtual ethernet devices which have
* a tendency to disappear outside of our control (e.g., due to Docker).
*/
if ((intf.isLoopback() || address.isLoopbackAddress()) && isUp(intf, address)) {
list.add(address)
}
}
}
if (list.isEmpty()) {
throw new IllegalArgumentException("no up-and-running loopback addresses found, got " + getInterfaces());
}
StringBuilder builder = new StringBuilder("san=");
for (int i = 0; i < list.size(); i++) {
InetAddress address = list.get(i);
String hostAddress;
if (address instanceof Inet6Address) {
hostAddress = compressedIPV6Address((Inet6Address) address);
} else {
hostAddress = address.getHostAddress();
}
builder.append("ip:").append(hostAddress);
String hostname = address.getHostName();
if (hostname.equals(address.getHostAddress()) == false) {
builder.append(",dns:").append(hostname);
}
if (i != (list.size() - 1)) {
builder.append(",");
}
}
return builder.toString();
}
private static boolean isUp(final NetworkInterface intf, final InetAddress address) throws IOException {
try {
return intf.isUp();
} catch (final SocketException e) {
/*
* In Elasticsearch production code (NetworkUtils) we suppress this if the device is a virtual ethernet device. That should not happen
* here since the interface must be a loopback device or the address a loopback address to get here to begin with.
*/
assert intf.isLoopback() || address.isLoopbackAddress()
throw new IOException("failed to check if interface [" + intf.getName() + "] is up", e)
}
}
private static String compressedIPV6Address(Inet6Address inet6Address) {
byte[] bytes = inet6Address.getAddress();
int[] hextets = new int[8];
for (int i = 0; i < hextets.length; i++) {
hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255;
}
compressLongestRunOfZeroes(hextets);
return hextetsToIPv6String(hextets);
}
/**
* Identify and mark the longest run of zeroes in an IPv6 address.
*
* <p>Only runs of two or more hextets are considered. In case of a tie, the
* leftmost run wins. If a qualifying run is found, its hextets are replaced
* by the sentinel value -1.
*
* @param hextets {@code int[]} mutable array of eight 16-bit hextets
*/
private static void compressLongestRunOfZeroes(int[] hextets) {
int bestRunStart = -1;
int bestRunLength = -1;
int runStart = -1;
for (int i = 0; i < hextets.length + 1; i++) {
if (i < hextets.length && hextets[i] == 0) {
if (runStart < 0) {
runStart = i;
}
} else if (runStart >= 0) {
int runLength = i - runStart;
if (runLength > bestRunLength) {
bestRunStart = runStart;
bestRunLength = runLength;
}
runStart = -1;
}
}
if (bestRunLength >= 2) {
Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1);
}
}
/**
* Convert a list of hextets into a human-readable IPv6 address.
*
* <p>In order for "::" compression to work, the input should contain negative
* sentinel values in place of the elided zeroes.
*
* @param hextets {@code int[]} array of eight 16-bit hextets, or -1s
*/
private static String hextetsToIPv6String(int[] hextets) {
/*
* While scanning the array, handle these state transitions:
* start->num => "num" start->gap => "::"
* num->num => ":num" num->gap => "::"
* gap->num => "num" gap->gap => ""
*/
StringBuilder buf = new StringBuilder(39);
boolean lastWasNumber = false;
for (int i = 0; i < hextets.length; i++) {
boolean thisIsNumber = hextets[i] >= 0;
if (thisIsNumber) {
if (lastWasNumber) {
buf.append(':');
}
buf.append(Integer.toHexString(hextets[i]));
} else {
if (i == 0 || lastWasNumber) {
buf.append("::");
}
}
lastWasNumber = thisIsNumber;
}
return buf.toString();
}
}