OpenSearch/x-pack/plugin/build.gradle

149 lines
5.7 KiB
Groovy
Raw Normal View History

import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
import org.elasticsearch.gradle.test.NodeInfo
import java.nio.charset.StandardCharsets
apply plugin: 'elasticsearch.testclusters'
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.rest-test'
archivesBaseName = 'x-pack'
dependencies {
testCompile project(path: xpackModule('core'), configuration: 'testArtifacts')
}
subprojects {
afterEvaluate {
if (project.plugins.hasPlugin(PluginBuildPlugin)) {
// see the root Gradle file for additional logic regarding this configuration
project.configurations.create('featureAwarePlugin')
project.dependencies.add('featureAwarePlugin', project.configurations.compileClasspath)
project.dependencies.add('featureAwarePlugin', project(':x-pack:test:feature-aware'))
project.dependencies.add('featureAwarePlugin', project.sourceSets.main.output.getClassesDirs())
File successMarker = file("$buildDir/markers/featureAware")
task featureAwareCheck(type: LoggedExec) {
description = "Runs FeatureAwareCheck on main classes."
dependsOn project.configurations.featureAwarePlugin
outputs.file(successMarker)
executable = "${project.runtimeJavaHome}/bin/java"
// default to main class files if such a source set exists
final List files = []
if (project.sourceSets.findByName("main")) {
files.add(project.sourceSets.main.output.classesDirs)
dependsOn project.tasks.classes
}
// filter out non-existent classes directories from empty source sets
final FileCollection classDirectories = project.files(files).filter { it.exists() }
doFirst {
args('-cp', project.configurations.featureAwarePlugin.asPath, 'org.elasticsearch.xpack.test.feature_aware.FeatureAwareCheck')
classDirectories.each { args it.getAbsolutePath() }
}
doLast {
successMarker.parentFile.mkdirs()
successMarker.setText("", 'UTF-8')
}
}
project.precommit.dependsOn featureAwareCheck
}
}
}
// https://github.com/elastic/x-plugins/issues/724
configurations {
testArtifacts.extendsFrom testRuntime
}
task testJar(type: Jar) {
appendix 'test'
from sourceSets.test.output
/*
* Stick the license and notice file in the jar. This isn't strictly
* needed because we don't publish it but it makes our super-paranoid
* tests happy.
*/
metaInf {
from(project.licenseFile.parent) {
include project.licenseFile.name
rename { 'LICENSE.txt' }
}
from(project.noticeFile.parent) {
include project.noticeFile.name
}
}
}
artifacts {
testArtifacts testJar
}
// location for keys and certificates
File keystoreDir = file("$buildDir/keystore")
File nodeKey = file("$keystoreDir/testnode.pem")
File nodeCert = file("$keystoreDir/testnode.crt")
// Add key and certs to test classpath: it expects them there
// User cert and key PEM files instead of a JKS Keystore for the cluster's trust material so that
// it can run in a FIPS 140 JVM
// TODO: Remove all existing uses of cross project file references when the new approach for referencing static files is available
// https://github.com/elastic/elasticsearch/pull/32201
task copyKeyCerts(type: Copy) {
from(project(':x-pack:plugin:core').file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/')) {
include 'testnode.crt', 'testnode.pem'
}
into keystoreDir
}
// Add keystores to test classpath: it expects it there
sourceSets.test.resources.srcDir(keystoreDir)
processTestResources.dependsOn(copyKeyCerts)
integTest.runner {
/*
* We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each
* other if we allow them to set the number of available processors as it's set-once in Netty.
*/
systemProperty 'es.set.netty.runtime.available.processors', 'false'
// TODO: fix this rest test to not depend on a hardcoded port!
def blacklist = ['getting_started/10_monitor_cluster_health/*']
boolean snapshot = Boolean.valueOf(System.getProperty("build.snapshot", "true"))
if (!snapshot) {
// these tests attempt to install basic/internal licenses signed against the dev/public.key
// Since there is no infrastructure in place (anytime soon) to generate licenses using the production
// private key, these tests are whitelisted in non-snapshot test runs
blacklist.addAll(['xpack/15_basic/*', 'license/20_put_license/*'])
}
systemProperty 'tests.rest.blacklist', blacklist.join(',')
dependsOn copyKeyCerts
}
testClusters.integTest {
distribution = 'DEFAULT' // this is important since we use the reindex module in ML
setting 'xpack.ml.enabled', 'true'
setting 'xpack.security.enabled', 'true'
// Integration tests are supposed to enable/disable exporters before/after each test
setting 'xpack.monitoring.exporters._local.type', 'local'
setting 'xpack.monitoring.exporters._local.enabled', 'false'
setting 'xpack.security.authc.token.enabled', 'true'
Add support for API keys to access Elasticsearch (#38291) X-Pack security supports built-in authentication service `token-service` that allows access tokens to be used to access Elasticsearch without using Basic authentication. The tokens are generated by `token-service` based on OAuth2 spec. The access token is a short-lived token (defaults to 20m) and refresh token with a lifetime of 24 hours, making them unsuitable for long-lived or recurring tasks where the system might go offline thereby failing refresh of tokens. This commit introduces a built-in authentication service `api-key-service` that adds support for long-lived tokens aka API keys to access Elasticsearch. The `api-key-service` is consulted after `token-service` in the authentication chain. By default, if TLS is enabled then `api-key-service` is also enabled. The service can be disabled using the configuration setting. The API keys:- - by default do not have an expiration but expiration can be configured where the API keys need to be expired after a certain amount of time. - when generated will keep authentication information of the user that generated them. - can be defined with a role describing the privileges for accessing Elasticsearch and will be limited by the role of the user that generated them - can be invalidated via invalidation API - information can be retrieved via a get API - that have been expired or invalidated will be retained for 1 week before being deleted. The expired API keys remover task handles this. Following are the API key management APIs:- 1. Create API Key - `PUT/POST /_security/api_key` 2. Get API key(s) - `GET /_security/api_key` 3. Invalidate API Key(s) `DELETE /_security/api_key` The API keys can be used to access Elasticsearch using `Authorization` header, where the auth scheme is `ApiKey` and the credentials, is the base64 encoding of API key Id and API key separated by a colon. Example:- ``` curl -H "Authorization: ApiKey YXBpLWtleS1pZDphcGkta2V5" http://localhost:9200/_cluster/health ``` Closes #34383
2019-02-04 22:21:57 -05:00
setting 'xpack.security.authc.api_key.enabled', 'true'
setting 'xpack.security.transport.ssl.enabled', 'true'
setting 'xpack.security.transport.ssl.key', nodeKey.name
setting 'xpack.security.transport.ssl.certificate', nodeCert.name
setting 'xpack.security.transport.ssl.verification_mode', 'certificate'
setting 'xpack.security.audit.enabled', 'true'
setting 'xpack.license.self_generated.type', 'trial'
keystore 'bootstrap.password', 'x-pack-test-password'
keystore 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode'
user username: "x_pack_rest_user", password: "x-pack-test-password"
extraConfigFile nodeKey.name, nodeKey
extraConfigFile nodeCert.name, nodeCert
}