mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-22 04:45:37 +00:00
This adds a new Rollup module to XPack, which allows users to configure periodic "rollup jobs" to pre-aggregate data. That data is then available later for search through a special RollupSearch API, which mimics the DSL and functionality of regular search. Rollups are used to drastically reduce the on-disk footprint of metric-based data (e.g. timestamped document with numeric and keyword fields). It can also be used to speed up aggregations over large datasets, since the rolled data will be considerably smaller and fewer documents to search. The PR adds seven new endpoints to interact with Rollups; create/get/delete job, start/stop job, a capabilities API similar to field-caps, and a Rollup-enabled search. Original commit: elastic/x-pack-elasticsearch@dcde91aacf
164 lines
6.2 KiB
Groovy
164 lines
6.2 KiB
Groovy
import org.elasticsearch.gradle.LoggedExec
|
|
import org.elasticsearch.gradle.MavenFilteringHack
|
|
import org.elasticsearch.gradle.test.NodeInfo
|
|
|
|
import java.nio.charset.StandardCharsets
|
|
import java.nio.file.Files
|
|
import java.nio.file.Path
|
|
import java.nio.file.StandardCopyOption
|
|
import org.elasticsearch.gradle.test.RunTask;
|
|
|
|
apply plugin: 'elasticsearch.es-meta-plugin'
|
|
|
|
archivesBaseName = 'x-pack'
|
|
|
|
es_meta_plugin {
|
|
name = 'x-pack'
|
|
description = 'Elasticsearch Expanded Pack Plugin'
|
|
plugins = ['core', 'deprecation', 'graph', 'logstash',
|
|
'ml', 'monitoring', 'security', 'upgrade', 'watcher', 'sql', 'rollup']
|
|
}
|
|
|
|
dependencies {
|
|
testCompile project(path: xpackModule('core'), configuration: 'testArtifacts')
|
|
}
|
|
|
|
// https://github.com/elastic/x-plugins/issues/724
|
|
configurations {
|
|
testArtifacts.extendsFrom testRuntime
|
|
}
|
|
|
|
task testJar(type: Jar) {
|
|
appendix 'test'
|
|
from sourceSets.test.output
|
|
}
|
|
artifacts {
|
|
testArtifacts testJar
|
|
}
|
|
|
|
integTestRunner {
|
|
/*
|
|
* We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each
|
|
* other if we allow them to set the number of available processors as it's set-once in Netty.
|
|
*/
|
|
systemProperty 'es.set.netty.runtime.available.processors', 'false'
|
|
|
|
|
|
// TODO: fix this rest test to not depend on a hardcoded port!
|
|
def blacklist = ['getting_started/10_monitor_cluster_health/*']
|
|
boolean snapshot = "true".equals(System.getProperty("build.snapshot", "true"))
|
|
if (!snapshot) {
|
|
// these tests attempt to install basic/internal licenses signed against the dev/public.key
|
|
// Since there is no infrastructure in place (anytime soon) to generate licenses using the production
|
|
// private key, these tests are whitelisted in non-snapshot test runs
|
|
blacklist.addAll(['xpack/15_basic/*', 'license/20_put_license/*'])
|
|
}
|
|
systemProperty 'tests.rest.blacklist', blacklist.join(',')
|
|
}
|
|
|
|
// location of generated keystores and certificates
|
|
File keystoreDir = new File(project.buildDir, 'keystore')
|
|
|
|
// Generate the node's keystore
|
|
File nodeKeystore = new File(keystoreDir, 'test-node.jks')
|
|
task createNodeKeyStore(type: LoggedExec) {
|
|
doFirst {
|
|
if (nodeKeystore.parentFile.exists() == false) {
|
|
nodeKeystore.parentFile.mkdirs()
|
|
}
|
|
if (nodeKeystore.exists()) {
|
|
delete nodeKeystore
|
|
}
|
|
}
|
|
executable = new File(project.runtimeJavaHome, 'bin/keytool')
|
|
standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8'))
|
|
args '-genkey',
|
|
'-alias', 'test-node',
|
|
'-keystore', nodeKeystore,
|
|
'-keyalg', 'RSA',
|
|
'-keysize', '2048',
|
|
'-validity', '712',
|
|
'-dname', 'CN=smoke-test-plugins-ssl',
|
|
'-keypass', 'keypass',
|
|
'-storepass', 'keypass'
|
|
}
|
|
|
|
// Add keystores to test classpath: it expects it there
|
|
sourceSets.test.resources.srcDir(keystoreDir)
|
|
processTestResources.dependsOn(createNodeKeyStore)
|
|
|
|
integTestCluster {
|
|
dependsOn createNodeKeyStore
|
|
setting 'xpack.ml.enabled', 'true'
|
|
setting 'logger.org.elasticsearch.xpack.ml.datafeed', 'TRACE'
|
|
// Integration tests are supposed to enable/disable exporters before/after each test
|
|
setting 'xpack.monitoring.exporters._local.type', 'local'
|
|
setting 'xpack.monitoring.exporters._local.enabled', 'false'
|
|
setting 'xpack.security.authc.token.enabled', 'true'
|
|
setting 'xpack.security.transport.ssl.enabled', 'true'
|
|
setting 'xpack.security.transport.ssl.keystore.path', nodeKeystore.name
|
|
setting 'xpack.security.transport.ssl.verification_mode', 'certificate'
|
|
setting 'xpack.security.audit.enabled', 'true'
|
|
setting 'xpack.license.self_generated.type', 'trial'
|
|
keystoreSetting 'bootstrap.password', 'x-pack-test-password'
|
|
keystoreSetting 'xpack.security.transport.ssl.keystore.secure_password', 'keypass'
|
|
distribution = 'zip' // this is important since we use the reindex module in ML
|
|
|
|
setupCommand 'setupTestUser', 'bin/x-pack/users', 'useradd', 'x_pack_rest_user', '-p', 'x-pack-test-password', '-r', 'superuser'
|
|
|
|
extraConfigFile nodeKeystore.name, nodeKeystore
|
|
|
|
waitCondition = { NodeInfo node, AntBuilder ant ->
|
|
File tmpFile = new File(node.cwd, 'wait.success')
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
// we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned
|
|
HttpURLConnection httpURLConnection = null;
|
|
try {
|
|
httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}&wait_for_status=yellow").openConnection();
|
|
httpURLConnection.setRequestProperty("Authorization", "Basic " +
|
|
Base64.getEncoder().encodeToString("x_pack_rest_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)));
|
|
httpURLConnection.setRequestMethod("GET");
|
|
httpURLConnection.connect();
|
|
if (httpURLConnection.getResponseCode() == 200) {
|
|
tmpFile.withWriter StandardCharsets.UTF_8.name(), {
|
|
it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name()))
|
|
}
|
|
}
|
|
} catch (Exception e) {
|
|
if (i == 9) {
|
|
logger.error("final attempt of calling cluster health failed", e)
|
|
} else {
|
|
logger.debug("failed to call cluster health", e)
|
|
}
|
|
} finally {
|
|
if (httpURLConnection != null) {
|
|
httpURLConnection.disconnect();
|
|
}
|
|
}
|
|
|
|
// did not start, so wait a bit before trying again
|
|
Thread.sleep(500L);
|
|
}
|
|
return tmpFile.exists()
|
|
}
|
|
}
|
|
|
|
run {
|
|
def licenseType = System.getProperty("license_type", "basic")
|
|
if (licenseType == 'trial') {
|
|
setting 'xpack.ml.enabled', 'true'
|
|
setting 'xpack.graph.enabled', 'true'
|
|
setting 'xpack.watcher.enabled', 'true'
|
|
setting 'xpack.license.self_generated.type', 'trial'
|
|
} else if (licenseType != 'basic') {
|
|
throw new IllegalArgumentException("Unsupported self-generated license type: [" + licenseType + "]. Must be " +
|
|
"[basic] or [trial].")
|
|
}
|
|
setting 'xpack.security.enabled', 'true'
|
|
setting 'xpack.monitoring.enabled', 'true'
|
|
setting 'xpack.sql.enabled', 'true'
|
|
setting 'xpack.rollup.enabled', 'true'
|
|
keystoreSetting 'bootstrap.password', 'password'
|
|
}
|