OpenSearch/qa/smoke-test-plugins-ssl/build.gradle

413 lines
14 KiB
Groovy
Raw Normal View History

import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.MavenFilteringHack
import org.elasticsearch.gradle.test.NodeInfo
import javax.net.ssl.HttpsURLConnection
import javax.net.ssl.KeyManagerFactory
import javax.net.ssl.SSLContext
import javax.net.ssl.TrustManagerFactory
import java.nio.charset.StandardCharsets
import java.security.KeyStore
import java.security.SecureRandom
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.rest-test'
dependencies {
testCompile project(path: xpackModule('core'), configuration: 'runtime')
}
security: ssl by default on the transport layer This commit adds the necessary changes to make SSL work on the transport layer by default. A large portion of the SSL configuration/settings was re-worked with this change. Some notable highlights include support for PEM cert/keys, reloadable SSL configuration, separate HTTP ssl configuration, and separate LDAP configuration. The following is a list of specific items addressed: * `SSLSettings` renamed to `SSLConfiguration` * `KeyConfig` and `TrustConfig` abstractions created. These hide the details of how `KeyManager[]` and `TrustManager[]` are loaded. These are also responsible for settings validation (ie keystore password is not null) * Configuration fallback is changed. Previously any setting would fallback to the "global" value (`xpack.security.ssl.*`). Now a keystore path, key path, ca paths, or truststore path must be specified otherwise the configuration for that key/trust will fallback to the global configuration. In other words if you want to change part of a keystore or truststore in a profile you need to supply all the information. This could be considered breaking if a user relied on the old fallback * JDK trusted certificates (`cacerts`) are trusted by default (breaking change). This can be disabled via a setting. * We now monitor the SSL files for changes and enable dynamic reloading of the configuration. This will make it easier for users when they are getting set up with certificates so they do not need to restart every time. This can be disabled via a setting * LDAP realms can now have their own SSL configurations * HTTP can now have its own SSL configuration * SSL is enabled by default on the transport layer only. Hostname verification is enabled as well. On startup if no global SSL settings are present and SSL is configured to be used, we auto generate one based on the default CA that is shipped. This process includes a best effort attempt to generate the subject alternative names. * `xpack.security.ssl.hostname_verification` is deprecated in favor of `xpack.security.ssl.hostname_verification.enabled` * added Bouncy Castle info to NOTICE * consolidated NOTICE and LICENSE files Closes elastic/elasticsearch#14 Closes elastic/elasticsearch#34 Closes elastic/elasticsearch#1483 Closes elastic/elasticsearch#1933 Addresses security portion of elastic/elasticsearch#673 Original commit: elastic/x-pack-elasticsearch@7c359db90bbea93110f3824ede82c09eafcd2f79
2016-04-12 09:19:07 -04:00
String outputDir = "generated-resources/${project.name}"
task copyXPackPluginProps(type: Copy) {
from project(xpackModule('core')).file('src/main/plugin-metadata')
from project(xpackModule('core')).tasks.pluginProperties
security: ssl by default on the transport layer This commit adds the necessary changes to make SSL work on the transport layer by default. A large portion of the SSL configuration/settings was re-worked with this change. Some notable highlights include support for PEM cert/keys, reloadable SSL configuration, separate HTTP ssl configuration, and separate LDAP configuration. The following is a list of specific items addressed: * `SSLSettings` renamed to `SSLConfiguration` * `KeyConfig` and `TrustConfig` abstractions created. These hide the details of how `KeyManager[]` and `TrustManager[]` are loaded. These are also responsible for settings validation (ie keystore password is not null) * Configuration fallback is changed. Previously any setting would fallback to the "global" value (`xpack.security.ssl.*`). Now a keystore path, key path, ca paths, or truststore path must be specified otherwise the configuration for that key/trust will fallback to the global configuration. In other words if you want to change part of a keystore or truststore in a profile you need to supply all the information. This could be considered breaking if a user relied on the old fallback * JDK trusted certificates (`cacerts`) are trusted by default (breaking change). This can be disabled via a setting. * We now monitor the SSL files for changes and enable dynamic reloading of the configuration. This will make it easier for users when they are getting set up with certificates so they do not need to restart every time. This can be disabled via a setting * LDAP realms can now have their own SSL configurations * HTTP can now have its own SSL configuration * SSL is enabled by default on the transport layer only. Hostname verification is enabled as well. On startup if no global SSL settings are present and SSL is configured to be used, we auto generate one based on the default CA that is shipped. This process includes a best effort attempt to generate the subject alternative names. * `xpack.security.ssl.hostname_verification` is deprecated in favor of `xpack.security.ssl.hostname_verification.enabled` * added Bouncy Castle info to NOTICE * consolidated NOTICE and LICENSE files Closes elastic/elasticsearch#14 Closes elastic/elasticsearch#34 Closes elastic/elasticsearch#1483 Closes elastic/elasticsearch#1933 Addresses security portion of elastic/elasticsearch#673 Original commit: elastic/x-pack-elasticsearch@7c359db90bbea93110f3824ede82c09eafcd2f79
2016-04-12 09:19:07 -04:00
into outputDir
}
project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps)
// needed to be consistent with ssl host checking
Object san = new SanEvaluator()
// location of generated keystores and certificates
File keystoreDir = new File(project.buildDir, 'keystore')
// Generate the node's keystore
File nodeKeystore = new File(keystoreDir, 'test-node.jks')
task createNodeKeyStore(type: LoggedExec) {
doFirst {
if (nodeKeystore.parentFile.exists() == false) {
nodeKeystore.parentFile.mkdirs()
}
if (nodeKeystore.exists()) {
delete nodeKeystore
}
}
executable = new File(project.runtimeJavaHome, 'bin/keytool')
standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8'))
args '-genkey',
'-alias', 'test-node',
'-keystore', nodeKeystore,
'-keyalg', 'RSA',
'-keysize', '2048',
'-validity', '712',
'-dname', 'CN=smoke-test-plugins-ssl',
'-keypass', 'keypass',
'-storepass', 'keypass',
'-ext', san
}
// Generate the client's keystore
File clientKeyStore = new File(keystoreDir, 'test-client.jks')
task createClientKeyStore(type: LoggedExec) {
doFirst {
if (clientKeyStore.parentFile.exists() == false) {
clientKeyStore.parentFile.mkdirs()
}
if (clientKeyStore.exists()) {
delete clientKeyStore
}
}
executable = new File(project.runtimeJavaHome, 'bin/keytool')
standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8'))
args '-genkey',
'-alias', 'test-client',
'-keystore', clientKeyStore,
'-keyalg', 'RSA',
'-keysize', '2048',
'-validity', '712',
'-dname', 'CN=smoke-test-plugins-ssl',
'-keypass', 'keypass',
'-storepass', 'keypass',
'-ext', san
}
// Export the node's certificate
File nodeCertificate = new File(keystoreDir, 'test-node.cert')
task exportNodeCertificate(type: LoggedExec) {
dependsOn createNodeKeyStore
doFirst {
if (nodeCertificate.parentFile.exists() == false) {
nodeCertificate.parentFile.mkdirs()
}
if (nodeCertificate.exists()) {
delete nodeCertificate
}
}
executable = new File(project.runtimeJavaHome, 'bin/keytool')
args '-export',
'-alias', 'test-node',
'-keystore', nodeKeystore,
'-storepass', 'keypass',
'-file', nodeCertificate
}
// Import the node certificate in the client's keystore
task importNodeCertificateInClientKeyStore(type: LoggedExec) {
dependsOn exportNodeCertificate
executable = new File(project.runtimeJavaHome, 'bin/keytool')
args '-import',
'-alias', 'test-node',
'-keystore', clientKeyStore,
'-storepass', 'keypass',
'-file', nodeCertificate,
'-noprompt'
}
// Export the client's certificate
File clientCertificate = new File(keystoreDir, 'test-client.cert')
task exportClientCertificate(type: LoggedExec) {
dependsOn createClientKeyStore
doFirst {
if (clientCertificate.parentFile.exists() == false) {
clientCertificate.parentFile.mkdirs()
}
if (clientCertificate.exists()) {
delete clientCertificate
}
}
executable = new File(project.runtimeJavaHome, 'bin/keytool')
args '-export',
'-alias', 'test-client',
'-keystore', clientKeyStore,
'-storepass', 'keypass',
'-file', clientCertificate
}
// Import the client certificate in the node's keystore
task importClientCertificateInNodeKeyStore(type: LoggedExec) {
dependsOn exportClientCertificate
executable = new File(project.runtimeJavaHome, 'bin/keytool')
args '-import',
'-alias', 'test-client',
'-keystore', nodeKeystore,
'-storepass', 'keypass',
'-file', clientCertificate,
'-noprompt'
}
forbiddenPatterns {
exclude '**/*.cert'
}
// Add keystores to test classpath: it expects it there
sourceSets.test.resources.srcDir(keystoreDir)
processTestResources.dependsOn(
createNodeKeyStore, createClientKeyStore,
importNodeCertificateInClientKeyStore, importClientCertificateInNodeKeyStore
)
integTestCluster.dependsOn(importClientCertificateInNodeKeyStore, importNodeCertificateInClientKeyStore)
ext.pluginsCount = 0
project(xpackProject('plugin').path).subprojects { Project p ->
// the meta plugin contains the individual xpack plugins
if (p.extensions.findByName('esplugin') != null) {
pluginsCount += 1
}
}
project.rootProject.subprojects.findAll { it.path.startsWith(':plugins:') }.each { subproj ->
// need to get a non-decorated project object, so must re-lookup the project by path
integTestCluster.plugin(subproj.path)
pluginsCount += 1
}
integTestCluster {
setting 'xpack.monitoring.collection.interval', '1s'
setting 'xpack.monitoring.exporters._http.type', 'http'
setting 'xpack.monitoring.exporters._http.enabled', 'false'
setting 'xpack.monitoring.exporters._http.ssl.truststore.path', clientKeyStore.name
setting 'xpack.monitoring.exporters._http.ssl.truststore.password', 'keypass'
setting 'xpack.monitoring.exporters._http.auth.username', 'monitoring_agent'
setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password'
setting 'xpack.monitoring.exporters._http.ssl.verification_mode', 'full'
setting 'xpack.security.http.ssl.enabled', 'true'
setting 'xpack.security.http.ssl.keystore.path', nodeKeystore.name
keystoreSetting 'xpack.security.http.ssl.keystore.secure_password', 'keypass'
setting 'xpack.ml.enabled', 'false'
plugin xpackProject('plugin').path
// copy keystores into config/
extraConfigFile nodeKeystore.name, nodeKeystore
extraConfigFile clientKeyStore.name, clientKeyStore
setupCommand 'setupTestUser',
'bin/x-pack/users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser'
setupCommand 'setupMonitoringUser',
'bin/x-pack/users', 'useradd', 'monitoring_agent', '-p', 'x-pack-test-password', '-r', 'remote_monitoring_agent'
waitCondition = { NodeInfo node, AntBuilder ant ->
File tmpFile = new File(node.cwd, 'wait.success')
KeyStore keyStore = KeyStore.getInstance("JKS");
keyStore.load(clientKeyStore.newInputStream(), 'keypass'.toCharArray());
KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
kmf.init(keyStore, 'keypass'.toCharArray());
TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init(keyStore);
SSLContext sslContext = SSLContext.getInstance("TLSv1.2");
sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom());
for (int i = 0; i < 10; i++) {
// we use custom wait logic here for HTTPS
HttpsURLConnection httpURLConnection = null;
try {
httpURLConnection = (HttpsURLConnection) new URL("https://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}&wait_for_status=yellow").openConnection();
httpURLConnection.setSSLSocketFactory(sslContext.getSocketFactory());
httpURLConnection.setRequestProperty("Authorization", "Basic " +
Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)));
httpURLConnection.setRequestMethod("GET");
httpURLConnection.connect();
if (httpURLConnection.getResponseCode() == 200) {
tmpFile.withWriter StandardCharsets.UTF_8.name(), {
it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name()))
}
}
} catch (IOException e) {
if (i == 9) {
logger.error("final attempt of calling cluster health failed", e)
} else {
logger.debug("failed to call cluster health", e)
}
} finally {
if (httpURLConnection != null) {
httpURLConnection.disconnect();
}
}
// did not start, so wait a bit before trying again
Thread.sleep(500L);
}
return tmpFile.exists()
}
}
ext.expansions = [
'expected.plugins.count': pluginsCount
]
processTestResources {
from(sourceSets.test.resources.srcDirs) {
include '**/*.yml'
inputs.properties(expansions)
MavenFilteringHack.filter(it, expansions)
}
}
/** A lazy evaluator to find the san to use for certificate generation. */
class SanEvaluator {
private static String san = null
String toString() {
synchronized (SanEvaluator.class) {
if (san == null) {
san = getSubjectAlternativeNameString()
}
}
return san
}
// Code stolen from NetworkUtils/InetAddresses/NetworkAddress to support SAN
/** Return all interfaces (and subinterfaces) on the system */
private static List<NetworkInterface> getInterfaces() throws SocketException {
List<NetworkInterface> all = new ArrayList<>();
addAllInterfaces(all, Collections.list(NetworkInterface.getNetworkInterfaces()));
Collections.sort(all, new Comparator<NetworkInterface>() {
@Override
public int compare(NetworkInterface left, NetworkInterface right) {
return Integer.compare(left.getIndex(), right.getIndex());
}
});
return all;
}
/** Helper for getInterfaces, recursively adds subinterfaces to {@code target} */
private static void addAllInterfaces(List<NetworkInterface> target, List<NetworkInterface> level) {
if (!level.isEmpty()) {
target.addAll(level);
for (NetworkInterface intf : level) {
addAllInterfaces(target, Collections.list(intf.getSubInterfaces()));
}
}
}
private static String getSubjectAlternativeNameString() {
List<InetAddress> list = new ArrayList<>();
for (NetworkInterface intf : getInterfaces()) {
if (intf.isUp()) {
// NOTE: some operating systems (e.g. BSD stack) assign a link local address to the loopback interface
// while technically not a loopback address, some of these treat them as one (e.g. OS X "localhost") so we must too,
// otherwise things just won't work out of box. So we include all addresses from loopback interfaces.
for (InetAddress address : Collections.list(intf.getInetAddresses())) {
if (intf.isLoopback() || address.isLoopbackAddress()) {
list.add(address);
}
}
}
}
if (list.isEmpty()) {
throw new IllegalArgumentException("no up-and-running loopback addresses found, got " + getInterfaces());
}
StringBuilder builder = new StringBuilder("san=");
for (int i = 0; i < list.size(); i++) {
InetAddress address = list.get(i);
String hostAddress;
if (address instanceof Inet6Address) {
hostAddress = compressedIPV6Address((Inet6Address)address);
} else {
hostAddress = address.getHostAddress();
}
builder.append("ip:").append(hostAddress);
String hostname = address.getHostName();
if (hostname.equals(address.getHostAddress()) == false) {
builder.append(",dns:").append(hostname);
}
if (i != (list.size() - 1)) {
builder.append(",");
}
}
return builder.toString();
}
private static String compressedIPV6Address(Inet6Address inet6Address) {
byte[] bytes = inet6Address.getAddress();
int[] hextets = new int[8];
for (int i = 0; i < hextets.length; i++) {
hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255;
}
compressLongestRunOfZeroes(hextets);
return hextetsToIPv6String(hextets);
}
/**
* Identify and mark the longest run of zeroes in an IPv6 address.
*
* <p>Only runs of two or more hextets are considered. In case of a tie, the
* leftmost run wins. If a qualifying run is found, its hextets are replaced
* by the sentinel value -1.
*
* @param hextets {@code int[]} mutable array of eight 16-bit hextets
*/
private static void compressLongestRunOfZeroes(int[] hextets) {
int bestRunStart = -1;
int bestRunLength = -1;
int runStart = -1;
for (int i = 0; i < hextets.length + 1; i++) {
if (i < hextets.length && hextets[i] == 0) {
if (runStart < 0) {
runStart = i;
}
} else if (runStart >= 0) {
int runLength = i - runStart;
if (runLength > bestRunLength) {
bestRunStart = runStart;
bestRunLength = runLength;
}
runStart = -1;
}
}
if (bestRunLength >= 2) {
Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1);
}
}
/**
* Convert a list of hextets into a human-readable IPv6 address.
*
* <p>In order for "::" compression to work, the input should contain negative
* sentinel values in place of the elided zeroes.
*
* @param hextets {@code int[]} array of eight 16-bit hextets, or -1s
*/
private static String hextetsToIPv6String(int[] hextets) {
/*
* While scanning the array, handle these state transitions:
* start->num => "num" start->gap => "::"
* num->num => ":num" num->gap => "::"
* gap->num => "num" gap->gap => ""
*/
StringBuilder buf = new StringBuilder(39);
boolean lastWasNumber = false;
for (int i = 0; i < hextets.length; i++) {
boolean thisIsNumber = hextets[i] >= 0;
if (thisIsNumber) {
if (lastWasNumber) {
buf.append(':');
}
buf.append(Integer.toHexString(hextets[i]));
} else {
if (i == 0 || lastWasNumber) {
buf.append("::");
}
}
lastWasNumber = thisIsNumber;
}
return buf.toString();
}
}