OpenSearch/docs/build.gradle

1101 lines
39 KiB
Groovy
Raw Normal View History

/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
apply plugin: 'elasticsearch.docs-test'
/* List of files that have snippets that will not work until platinum tests can occur ... */
buildRestTests.expectedUnconvertedCandidates = [
'reference/ml/transforms.asciidoc',
'reference/ml/apis/delete-calendar-event.asciidoc',
'reference/ml/apis/get-bucket.asciidoc',
'reference/ml/apis/get-category.asciidoc',
'reference/ml/apis/get-influencer.asciidoc',
'reference/ml/apis/get-job-stats.asciidoc',
'reference/ml/apis/get-overall-buckets.asciidoc',
'reference/ml/apis/get-record.asciidoc',
'reference/ml/apis/get-snapshot.asciidoc',
'reference/ml/apis/post-data.asciidoc',
'reference/ml/apis/revert-snapshot.asciidoc',
'reference/ml/apis/update-snapshot.asciidoc',
]
integTestCluster {
/* Enable regexes in painless so our tests don't complain about example
* snippets that use them. */
setting 'script.painless.regex.enabled', 'true'
Closure configFile = {
extraConfigFile it, "src/test/cluster/config/$it"
}
configFile 'analysis/example_word_list.txt'
configFile 'analysis/hyphenation_patterns.xml'
configFile 'analysis/synonym.txt'
configFile 'analysis/stemmer_override.txt'
configFile 'userdict_ja.txt'
configFile 'userdict_ko.txt'
configFile 'KeywordTokenizer.rbbi'
extraConfigFile 'hunspell/en_US/en_US.aff', '../server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff'
extraConfigFile 'hunspell/en_US/en_US.dic', '../server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic'
// Whitelist reindexing from the local node so we can test it.
setting 'reindex.remote.whitelist', '127.0.0.1:*'
// TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults
systemProperty 'es.scripting.use_java_time', 'false'
systemProperty 'es.scripting.update.ctx_in_params', 'false'
}
Add JDK11 support and enable in CI (#31644) * Upgrade bouncycastle Required to fix `bcprov-jdk15on-1.55.jar; invalid manifest format ` on jdk 11 * Downgrade bouncycastle to avoid invalid manifest * Add checksum for new jars * Update tika permissions for jdk 11 * Mute test failing on jdk 11 * Add JDK11 to CI * Thread#stop(Throwable) was removed http://mail.openjdk.java.net/pipermail/core-libs-dev/2018-June/053536.html * Disable failing tests #31456 * Temprorarily disable doc tests To see if there are other failures on JDK11 * Only blacklist specific doc tests * Disable only failing tests in ingest attachment plugin * Mute failing HDFS tests #31498 * Mute failing lang-painless tests #31500 * Fix backwards compatability builds Fix JAVA version to 10 for ES 6.3 * Add 6.x to bwx -> java10 * Prefix out and err from buildBwcVersion for readability ``` > Task :distribution:bwc:next-bugfix-snapshot:buildBwcVersion [bwc] :buildSrc:compileJava [bwc] WARNING: An illegal reflective access operation has occurred [bwc] WARNING: Illegal reflective access by org.codehaus.groovy.reflection.CachedClass (file:/home/alpar/.gradle/wrapper/dists/gradle-4.5-all/cg9lyzfg3iwv6fa00os9gcgj4/gradle-4.5/lib/groovy-all-2.4.12.jar) to method java.lang.Object.finalize() [bwc] WARNING: Please consider reporting this to the maintainers of org.codehaus.groovy.reflection.CachedClass [bwc] WARNING: Use --illegal-access=warn to enable warnings of further illegal reflective access operations [bwc] WARNING: All illegal access operations will be denied in a future release [bwc] :buildSrc:compileGroovy [bwc] :buildSrc:writeVersionProperties [bwc] :buildSrc:processResources [bwc] :buildSrc:classes [bwc] :buildSrc:jar ``` * Also set RUNTIME_JAVA_HOME for bwcBuild So that we can make sure it's not too new for the build to understand. * Align bouncycastle dependency * fix painles array tets closes #31500 * Update jar checksums * Keep 8/10 runtime/compile untill consensus builds on 11 * Only skip failing tests if running on Java 11 * Failures are dependent of compile java version not runtime * Condition doc test exceptions on compiler java version as well * Disable hdfs tests based on runtime java * Set runtime java to minimum supported for bwc * PR review * Add comment with ticket for forbidden apis
2018-07-04 23:24:01 -04:00
// remove when https://github.com/elastic/elasticsearch/issues/31305 is fixed
if (rootProject.ext.compilerJavaVersion.isJava11()) {
integTestRunner {
systemProperty 'tests.rest.blacklist', [
'plugins/ingest-attachment/line_164',
'plugins/ingest-attachment/line_117'
].join(',')
}
}
// Build the cluster with all plugins
project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj ->
/* Skip repositories. We just aren't going to be able to test them so it
* doesn't make sense to waste time installing them. */
if (subproj.path.startsWith(':plugins:repository-')) {
return
}
subproj.afterEvaluate { // need to wait until the project has been configured
integTestCluster {
plugin subproj.path
}
}
}
buildRestTests.docs = fileTree(projectDir) {
// No snippets in here!
exclude 'build.gradle'
// That is where the snippets go, not where they come from!
exclude 'build'
// Just syntax examples
exclude 'README.asciidoc'
// Broken code snippet tests
exclude 'reference/rollup/rollup-getting-started.asciidoc'
exclude 'reference/rollup/apis/rollup-job-config.asciidoc'
exclude 'reference/rollup/apis/rollup-index-caps.asciidoc'
exclude 'reference/rollup/apis/put-job.asciidoc'
exclude 'reference/rollup/apis/stop-job.asciidoc'
exclude 'reference/rollup/apis/start-job.asciidoc'
exclude 'reference/rollup/apis/rollup-search.asciidoc'
exclude 'reference/rollup/apis/delete-job.asciidoc'
exclude 'reference/rollup/apis/get-job.asciidoc'
exclude 'reference/rollup/apis/rollup-caps.asciidoc'
}
listSnippets.docs = buildRestTests.docs
Closure setupTwitter = { String name, int count ->
buildRestTests.setups[name] = '''
- do:
indices.create:
index: twitter
body:
settings:
number_of_shards: 1
number_of_replicas: 1
mappings:
_doc:
properties:
user:
type: keyword
doc_values: true
date:
type: date
likes:
type: long
- do:
bulk:
index: twitter
type: _doc
refresh: true
body: |'''
for (int i = 0; i < count; i++) {
String user, text
if (i == 0) {
user = 'kimchy'
text = 'trying out Elasticsearch'
} else {
user = 'test'
text = "some message with the number $i"
}
buildRestTests.setups[name] += """
{"index":{"_id": "$i"}}
{"user": "$user", "message": "$text", "date": "2009-11-15T14:12:12", "likes": $i}"""
}
}
setupTwitter('twitter', 5)
setupTwitter('big_twitter', 120)
setupTwitter('huge_twitter', 1200)
buildRestTests.setups['host'] = '''
# Fetch the http host. We use the host of the master because we know there will always be a master.
- do:
cluster.state: {}
- set: { master_node: master }
- do:
nodes.info:
Cross Cluster Search: make remote clusters optional (#27182) Today Cross Cluster Search requires at least one node in each remote cluster to be up once the cross cluster search is run. Otherwise the whole search request fails despite some of the data (either local and/or remote) is available. This happens when performing the _search/shards calls to find out which remote shards the query has to be executed on. This scenario is different from shard failures that may happen later on when the query is actually executed, in case e.g. remote shards are missing, which is not going to fail the whole request but rather yield partial results, and the _shards section in the response will indicate that. This commit introduces a boolean setting per cluster called search.remote.$cluster_alias.skip_if_disconnected, set to false by default, which allows to skip certain clusters if they are down when trying to reach them through a cross cluster search requests. By default all clusters are mandatory. Scroll requests support such setting too when they are first initiated (first search request with scroll parameter), but subsequent scroll rounds (_search/scroll endpoint) will fail if some of the remote clusters went down meanwhile. The search API response contains now a new _clusters section, similar to the _shards section, that gets returned whenever one or more clusters were disconnected and got skipped: "_clusters" : { "total" : 3, "successful" : 2, "skipped" : 1 } Such section won't be part of the response if no clusters have been skipped. The per cluster skip_unavailable setting value has also been added to the output of the remote/info API.
2017-11-21 05:41:47 -05:00
metric: [ http, transport ]
- is_true: nodes.$master.http.publish_address
- set: {nodes.$master.http.publish_address: host}
Cross Cluster Search: make remote clusters optional (#27182) Today Cross Cluster Search requires at least one node in each remote cluster to be up once the cross cluster search is run. Otherwise the whole search request fails despite some of the data (either local and/or remote) is available. This happens when performing the _search/shards calls to find out which remote shards the query has to be executed on. This scenario is different from shard failures that may happen later on when the query is actually executed, in case e.g. remote shards are missing, which is not going to fail the whole request but rather yield partial results, and the _shards section in the response will indicate that. This commit introduces a boolean setting per cluster called search.remote.$cluster_alias.skip_if_disconnected, set to false by default, which allows to skip certain clusters if they are down when trying to reach them through a cross cluster search requests. By default all clusters are mandatory. Scroll requests support such setting too when they are first initiated (first search request with scroll parameter), but subsequent scroll rounds (_search/scroll endpoint) will fail if some of the remote clusters went down meanwhile. The search API response contains now a new _clusters section, similar to the _shards section, that gets returned whenever one or more clusters were disconnected and got skipped: "_clusters" : { "total" : 3, "successful" : 2, "skipped" : 1 } Such section won't be part of the response if no clusters have been skipped. The per cluster skip_unavailable setting value has also been added to the output of the remote/info API.
2017-11-21 05:41:47 -05:00
- set: {nodes.$master.transport.publish_address: transport_host}
'''
buildRestTests.setups['node'] = '''
# Fetch the node name. We use the host of the master because we know there will always be a master.
- do:
cluster.state: {}
- is_true: master_node
- set: { master_node: node_name }
'''
// Used by scripted metric docs
buildRestTests.setups['ledger'] = '''
- do:
indices.create:
index: ledger
body:
settings:
number_of_shards: 2
number_of_replicas: 1
mappings:
_doc:
properties:
type:
type: keyword
amount:
type: double
- do:
bulk:
index: ledger
type: _doc
refresh: true
body: |
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 200, "type": "sale", "description": "something"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 10, "type": "expense", "decription": "another thing"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 150, "type": "sale", "description": "blah"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 50, "type": "expense", "description": "cost of blah"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 50, "type": "expense", "description": "advertisement"}'''
// Used by aggregation docs
buildRestTests.setups['sales'] = '''
- do:
indices.create:
index: sales
body:
settings:
number_of_shards: 2
number_of_replicas: 1
mappings:
_doc:
properties:
type:
type: keyword
- do:
bulk:
index: sales
type: _doc
refresh: true
body: |
{"index":{}}
{"date": "2015/01/01 00:00:00", "price": 200, "promoted": true, "rating": 1, "type": "hat"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "price": 200, "promoted": true, "rating": 1, "type": "t-shirt"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "price": 150, "promoted": true, "rating": 5, "type": "bag"}
{"index":{}}
{"date": "2015/02/01 00:00:00", "price": 50, "promoted": false, "rating": 1, "type": "hat"}
{"index":{}}
{"date": "2015/02/01 00:00:00", "price": 10, "promoted": true, "rating": 4, "type": "t-shirt"}
{"index":{}}
{"date": "2015/03/01 00:00:00", "price": 200, "promoted": true, "rating": 1, "type": "hat"}
{"index":{}}
{"date": "2015/03/01 00:00:00", "price": 175, "promoted": false, "rating": 2, "type": "t-shirt"}'''
// Dummy bank account data used by getting-started.asciidoc
buildRestTests.setups['bank'] = '''
- do:
indices.create:
index: bank
body:
settings:
number_of_shards: 5
number_of_routing_shards: 5
- do:
bulk:
index: bank
type: _doc
refresh: true
body: |
#bank_data#
'''
/* Load the actual accounts only if we're going to use them. This complicates
* dependency checking but that is a small price to pay for not building a
* 400kb string every time we start the build. */
File accountsFile = new File("$projectDir/src/test/resources/accounts.json")
buildRestTests.inputs.file(accountsFile)
buildRestTests.doFirst {
String accounts = accountsFile.getText('UTF-8')
// Indent like a yaml test needs
accounts = accounts.replaceAll('(?m)^', ' ')
buildRestTests.setups['bank'] =
buildRestTests.setups['bank'].replace('#bank_data#', accounts)
}
// Used by index boost doc
buildRestTests.setups['index_boost'] = '''
- do:
indices.create:
index: index1
- do:
indices.create:
index: index2
- do:
indices.put_alias:
index: index1
name: alias1
'''
// Used by sampler and diversified-sampler aggregation docs
buildRestTests.setups['stackoverflow'] = '''
- do:
indices.create:
index: stackoverflow
body:
settings:
number_of_shards: 1
number_of_replicas: 1
mappings:
_doc:
properties:
author:
type: keyword
tags:
type: keyword
- do:
bulk:
index: stackoverflow
type: _doc
refresh: true
body: |'''
// Make Kibana strongly connected to elasticsearch and logstash
// Make Kibana rarer (and therefore higher-ranking) than Javascript
// Make Javascript strongly connected to jquery and angular
// Make Cabana strongly connected to elasticsearch but only as a result of a single author
for (int i = 0; i < 150; i++) {
buildRestTests.setups['stackoverflow'] += """
{"index":{}}
{"author": "very_relevant_$i", "tags": ["elasticsearch", "kibana"]}"""
}
for (int i = 0; i < 50; i++) {
buildRestTests.setups['stackoverflow'] += """
{"index":{}}
{"author": "very_relevant_$i", "tags": ["logstash", "kibana"]}"""
}
for (int i = 0; i < 200; i++) {
buildRestTests.setups['stackoverflow'] += """
{"index":{}}
{"author": "partially_relevant_$i", "tags": ["javascript", "jquery"]}"""
}
for (int i = 0; i < 200; i++) {
buildRestTests.setups['stackoverflow'] += """
{"index":{}}
{"author": "partially_relevant_$i", "tags": ["javascript", "angular"]}"""
}
for (int i = 0; i < 50; i++) {
buildRestTests.setups['stackoverflow'] += """
{"index":{}}
{"author": "noisy author", "tags": ["elasticsearch", "cabana"]}"""
}
buildRestTests.setups['stackoverflow'] += """
"""
// Used by significant_text aggregation docs
buildRestTests.setups['news'] = '''
- do:
indices.create:
index: news
body:
settings:
number_of_shards: 1
number_of_replicas: 1
mappings:
_doc:
properties:
source:
type: keyword
content:
type: text
- do:
bulk:
index: news
type: _doc
refresh: true
body: |'''
// Make h5n1 strongly connected to bird flu
for (int i = 0; i < 100; i++) {
buildRestTests.setups['news'] += """
{"index":{}}
{"source": "very_relevant_$i", "content": "bird flu h5n1"}"""
}
for (int i = 0; i < 100; i++) {
buildRestTests.setups['news'] += """
{"index":{}}
{"source": "filler_$i", "content": "bird dupFiller "}"""
}
for (int i = 0; i < 100; i++) {
buildRestTests.setups['news'] += """
{"index":{}}
{"source": "filler_$i", "content": "flu dupFiller "}"""
}
for (int i = 0; i < 20; i++) {
buildRestTests.setups['news'] += """
{"index":{}}
{"source": "partially_relevant_$i", "content": "elasticsearch dupFiller dupFiller dupFiller dupFiller pozmantier"}"""
}
for (int i = 0; i < 10; i++) {
buildRestTests.setups['news'] += """
{"index":{}}
{"source": "partially_relevant_$i", "content": "elasticsearch logstash kibana"}"""
}
buildRestTests.setups['news'] += """
"""
// Used by some aggregations
buildRestTests.setups['exams'] = '''
- do:
indices.create:
index: exams
body:
settings:
number_of_shards: 1
number_of_replicas: 1
mappings:
_doc:
properties:
grade:
type: byte
- do:
bulk:
index: exams
type: _doc
refresh: true
body: |
{"index":{}}
{"grade": 100, "weight": 2}
{"index":{}}
{"grade": 50, "weight": 3}'''
buildRestTests.setups['stored_example_script'] = '''
# Simple script to load a field. Not really a good example, but a simple one.
- do:
put_script:
id: "my_script"
body: { "script": { "lang": "painless", "source": "doc[params.field].value" } }
- match: { acknowledged: true }
'''
buildRestTests.setups['stored_scripted_metric_script'] = '''
- do:
put_script:
id: "my_init_script"
body: { "script": { "lang": "painless", "source": "state.transactions = []" } }
- match: { acknowledged: true }
- do:
put_script:
id: "my_map_script"
body: { "script": { "lang": "painless", "source": "state.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)" } }
- match: { acknowledged: true }
- do:
put_script:
id: "my_combine_script"
body: { "script": { "lang": "painless", "source": "double profit = 0;for (t in state.transactions) { profit += t; } return profit" } }
- match: { acknowledged: true }
- do:
put_script:
id: "my_reduce_script"
body: { "script": { "lang": "painless", "source": "double profit = 0;for (a in states) { profit += a; } return profit" } }
- match: { acknowledged: true }
'''
// Used by analyze api
buildRestTests.setups['analyze_sample'] = '''
- do:
indices.create:
index: analyze_sample
body:
settings:
number_of_shards: 1
number_of_replicas: 0
analysis:
normalizer:
my_normalizer:
type: custom
filter: [lowercase]
mappings:
_doc:
properties:
obj1.field1:
type: text'''
// Used by percentile/percentile-rank aggregations
buildRestTests.setups['latency'] = '''
- do:
indices.create:
index: latency
body:
settings:
number_of_shards: 1
number_of_replicas: 1
mappings:
_doc:
properties:
load_time:
type: long
- do:
bulk:
index: latency
type: _doc
refresh: true
body: |'''
for (int i = 0; i < 100; i++) {
def value = i
if (i % 10) {
value = i*10
}
buildRestTests.setups['latency'] += """
{"index":{}}
{"load_time": "$value"}"""
}
// Used by iprange agg
buildRestTests.setups['iprange'] = '''
- do:
indices.create:
index: ip_addresses
body:
settings:
number_of_shards: 1
number_of_replicas: 1
mappings:
_doc:
properties:
ip:
type: ip
- do:
bulk:
index: ip_addresses
type: _doc
refresh: true
body: |'''
for (int i = 0; i < 255; i++) {
buildRestTests.setups['iprange'] += """
{"index":{}}
{"ip": "10.0.0.$i"}"""
}
for (int i = 0; i < 5; i++) {
buildRestTests.setups['iprange'] += """
{"index":{}}
{"ip": "9.0.0.$i"}"""
buildRestTests.setups['iprange'] += """
{"index":{}}
{"ip": "11.0.0.$i"}"""
buildRestTests.setups['iprange'] += """
{"index":{}}
{"ip": "12.0.0.$i"}"""
}
2018-06-22 18:40:25 -04:00
// Used by SQL because it looks SQL-ish
buildRestTests.setups['library'] = '''
- do:
indices.create:
index: library
body:
settings:
number_of_shards: 1
number_of_replicas: 1
mappings:
book:
properties:
name:
type: text
fields:
keyword:
type: keyword
author:
type: text
fields:
keyword:
type: keyword
release_date:
type: date
page_count:
type: short
- do:
bulk:
index: library
type: book
refresh: true
body: |
{"index":{"_id": "Leviathan Wakes"}}
{"name": "Leviathan Wakes", "author": "James S.A. Corey", "release_date": "2011-06-02", "page_count": 561}
{"index":{"_id": "Hyperion"}}
{"name": "Hyperion", "author": "Dan Simmons", "release_date": "1989-05-26", "page_count": 482}
{"index":{"_id": "Dune"}}
{"name": "Dune", "author": "Frank Herbert", "release_date": "1965-06-01", "page_count": 604}
{"index":{"_id": "Dune Messiah"}}
{"name": "Dune Messiah", "author": "Frank Herbert", "release_date": "1969-10-15", "page_count": 331}
{"index":{"_id": "Children of Dune"}}
{"name": "Children of Dune", "author": "Frank Herbert", "release_date": "1976-04-21", "page_count": 408}
{"index":{"_id": "God Emperor of Dune"}}
{"name": "God Emperor of Dune", "author": "Frank Herbert", "release_date": "1981-05-28", "page_count": 454}
{"index":{"_id": "Consider Phlebas"}}
{"name": "Consider Phlebas", "author": "Iain M. Banks", "release_date": "1987-04-23", "page_count": 471}
{"index":{"_id": "Pandora's Star"}}
{"name": "Pandora's Star", "author": "Peter F. Hamilton", "release_date": "2004-03-02", "page_count": 768}
{"index":{"_id": "Revelation Space"}}
{"name": "Revelation Space", "author": "Alastair Reynolds", "release_date": "2000-03-15", "page_count": 585}
{"index":{"_id": "A Fire Upon the Deep"}}
{"name": "A Fire Upon the Deep", "author": "Vernor Vinge", "release_date": "1992-06-01", "page_count": 613}
{"index":{"_id": "Ender's Game"}}
{"name": "Ender's Game", "author": "Orson Scott Card", "release_date": "1985-06-01", "page_count": 324}
{"index":{"_id": "1984"}}
{"name": "1984", "author": "George Orwell", "release_date": "1985-06-01", "page_count": 328}
{"index":{"_id": "Fahrenheit 451"}}
{"name": "Fahrenheit 451", "author": "Ray Bradbury", "release_date": "1953-10-15", "page_count": 227}
{"index":{"_id": "Brave New World"}}
{"name": "Brave New World", "author": "Aldous Huxley", "release_date": "1932-06-01", "page_count": 268}
{"index":{"_id": "Foundation"}}
{"name": "Foundation", "author": "Isaac Asimov", "release_date": "1951-06-01", "page_count": 224}
{"index":{"_id": "The Giver"}}
{"name": "The Giver", "author": "Lois Lowry", "release_date": "1993-04-26", "page_count": 208}
{"index":{"_id": "Slaughterhouse-Five"}}
{"name": "Slaughterhouse-Five", "author": "Kurt Vonnegut", "release_date": "1969-06-01", "page_count": 275}
{"index":{"_id": "The Hitchhiker's Guide to the Galaxy"}}
{"name": "The Hitchhiker's Guide to the Galaxy", "author": "Douglas Adams", "release_date": "1979-10-12", "page_count": 180}
{"index":{"_id": "Snow Crash"}}
{"name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470}
{"index":{"_id": "Neuromancer"}}
{"name": "Neuromancer", "author": "William Gibson", "release_date": "1984-07-01", "page_count": 271}
{"index":{"_id": "The Handmaid's Tale"}}
{"name": "The Handmaid's Tale", "author": "Margaret Atwood", "release_date": "1985-06-01", "page_count": 311}
{"index":{"_id": "Starship Troopers"}}
{"name": "Starship Troopers", "author": "Robert A. Heinlein", "release_date": "1959-12-01", "page_count": 335}
{"index":{"_id": "The Left Hand of Darkness"}}
{"name": "The Left Hand of Darkness", "author": "Ursula K. Le Guin", "release_date": "1969-06-01", "page_count": 304}
{"index":{"_id": "The Moon is a Harsh Mistress"}}
{"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288}
'''
buildRestTests.setups['sensor_rollup_job'] = '''
- do:
indices.create:
index: sensor-1
body:
settings:
number_of_shards: 1
number_of_replicas: 0
mappings:
_doc:
properties:
timestamp:
type: date
temperature:
type: long
voltage:
type: float
node:
type: keyword
- do:
xpack.rollup.put_job:
id: "sensor"
body: >
{
"index_pattern": "sensor-*",
"rollup_index": "sensor_rollup",
"cron": "*/30 * * * * ?",
"page_size" :1000,
"groups" : {
"date_histogram": {
"field": "timestamp",
"interval": "1h",
"delay": "7d"
},
"terms": {
"fields": ["node"]
}
},
"metrics": [
{
"field": "temperature",
"metrics": ["min", "max", "sum"]
},
{
"field": "voltage",
"metrics": ["avg"]
}
]
}
'''
buildRestTests.setups['sensor_started_rollup_job'] = '''
- do:
indices.create:
index: sensor-1
body:
settings:
number_of_shards: 1
number_of_replicas: 0
mappings:
_doc:
properties:
timestamp:
type: date
temperature:
type: long
voltage:
type: float
node:
type: keyword
- do:
bulk:
index: sensor-1
type: _doc
refresh: true
body: |
{"index":{}}
{"timestamp": 1516729294000, "temperature": 200, "voltage": 5.2, "node": "a"}
{"index":{}}
{"timestamp": 1516642894000, "temperature": 201, "voltage": 5.8, "node": "b"}
{"index":{}}
{"timestamp": 1516556494000, "temperature": 202, "voltage": 5.1, "node": "a"}
{"index":{}}
{"timestamp": 1516470094000, "temperature": 198, "voltage": 5.6, "node": "b"}
{"index":{}}
{"timestamp": 1516383694000, "temperature": 200, "voltage": 4.2, "node": "c"}
{"index":{}}
{"timestamp": 1516297294000, "temperature": 202, "voltage": 4.0, "node": "c"}
- do:
xpack.rollup.put_job:
id: "sensor"
body: >
{
"index_pattern": "sensor-*",
"rollup_index": "sensor_rollup",
"cron": "* * * * * ?",
"page_size" :1000,
"groups" : {
"date_histogram": {
"field": "timestamp",
"interval": "1h",
"delay": "7d"
},
"terms": {
"fields": ["node"]
}
},
"metrics": [
{
"field": "temperature",
"metrics": ["min", "max", "sum"]
},
{
"field": "voltage",
"metrics": ["avg"]
}
]
}
- do:
xpack.rollup.start_job:
id: "sensor"
'''
buildRestTests.setups['sensor_index'] = '''
- do:
indices.create:
index: sensor-1
body:
settings:
number_of_shards: 1
number_of_replicas: 0
mappings:
_doc:
properties:
timestamp:
type: date
temperature:
type: long
voltage:
type: float
node:
type: keyword
load:
type: double
net_in:
type: long
net_out:
type: long
hostname:
type: keyword
datacenter:
type: keyword
'''
buildRestTests.setups['sensor_prefab_data'] = '''
- do:
indices.create:
index: sensor-1
body:
settings:
number_of_shards: 1
number_of_replicas: 0
mappings:
_doc:
properties:
timestamp:
type: date
temperature:
type: long
voltage:
type: float
node:
type: keyword
- do:
indices.create:
index: sensor_rollup
body:
settings:
number_of_shards: 1
number_of_replicas: 0
mappings:
_doc:
properties:
node.terms.value:
type: keyword
temperature.sum.value:
type: double
temperature.max.value:
type: double
temperature.min.value:
type: double
timestamp.date_histogram.time_zone:
type: keyword
timestamp.date_histogram.interval:
type: keyword
timestamp.date_histogram.timestamp:
type: date
timestamp.date_histogram._count:
type: long
voltage.avg.value:
type: double
voltage.avg._count:
type: long
_rollup.id:
type: keyword
_rollup.version:
type: long
_meta:
_rollup:
sensor:
cron: "* * * * * ?"
rollup_index: "sensor_rollup"
index_pattern: "sensor-*"
timeout: "20s"
page_size: 1000
groups:
date_histogram:
delay: "7d"
field: "timestamp"
interval: "1h"
time_zone: "UTC"
terms:
fields:
- "node"
id: sensor
metrics:
- field: "temperature"
metrics:
- min
- max
- sum
- field: "voltage"
metrics:
- avg
- do:
bulk:
index: sensor_rollup
type: _doc
refresh: true
body: |
{"index":{}}
{"node.terms.value":"b","temperature.sum.value":201.0,"temperature.max.value":201.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":201.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.800000190734863,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516640400000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
{"index":{}}
{"node.terms.value":"c","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516381200000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
{"index":{}}
{"node.terms.value":"a","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.099999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516554000000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
{"index":{}}
{"node.terms.value":"a","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516726800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
{"index":{}}
{"node.terms.value":"b","temperature.sum.value":198.0,"temperature.max.value":198.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":198.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.599999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516467600000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
{"index":{}}
{"node.terms.value":"c","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.0,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516294800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"}
'''
buildRestTests.setups['sample_job'] = '''
- do:
xpack.ml.put_job:
job_id: "sample_job"
body: >
{
"description" : "Very basic job",
"analysis_config" : {
"bucket_span":"10m",
"detectors" :[
{
"function": "count"
}
]},
"data_description" : {
"time_field":"timestamp",
"time_format": "epoch_ms"
}
}
'''
buildRestTests.setups['farequote_index'] = '''
- do:
indices.create:
index: farequote
body:
settings:
number_of_shards: 1
number_of_replicas: 0
mappings:
metric:
properties:
time:
type: date
responsetime:
type: float
airline:
type: keyword
doc_count:
type: integer
'''
buildRestTests.setups['farequote_data'] = buildRestTests.setups['farequote_index'] + '''
- do:
bulk:
index: farequote
type: metric
refresh: true
body: |
{"index": {"_id":"1"}}
{"airline":"JZA","responsetime":990.4628,"time":"2016-02-07T00:00:00+0000", "doc_count": 5}
{"index": {"_id":"2"}}
{"airline":"JBU","responsetime":877.5927,"time":"2016-02-07T00:00:00+0000", "doc_count": 23}
{"index": {"_id":"3"}}
{"airline":"KLM","responsetime":1355.4812,"time":"2016-02-07T00:00:00+0000", "doc_count": 42}
'''
buildRestTests.setups['farequote_job'] = buildRestTests.setups['farequote_data'] + '''
- do:
xpack.ml.put_job:
job_id: "farequote"
body: >
{
"analysis_config": {
"bucket_span": "60m",
"detectors": [{
"function": "mean",
"field_name": "responsetime",
"by_field_name": "airline"
}],
"summary_count_field_name": "doc_count"
},
"data_description": {
"time_field": "time"
}
}
'''
buildRestTests.setups['farequote_datafeed'] = buildRestTests.setups['farequote_job'] + '''
- do:
xpack.ml.put_datafeed:
datafeed_id: "datafeed-farequote"
body: >
{
"job_id":"farequote",
"indexes":"farequote"
}
'''
buildRestTests.setups['server_metrics_index'] = '''
- do:
indices.create:
index: server-metrics
body:
settings:
number_of_shards: 1
number_of_replicas: 0
mappings:
metric:
properties:
timestamp:
type: date
total:
type: long
'''
buildRestTests.setups['server_metrics_data'] = buildRestTests.setups['server_metrics_index'] + '''
- do:
bulk:
index: server-metrics
type: metric
refresh: true
body: |
{"index": {"_id":"1177"}}
{"timestamp":"2017-03-23T13:00:00","total":40476}
{"index": {"_id":"1178"}}
{"timestamp":"2017-03-23T13:00:00","total":15287}
{"index": {"_id":"1179"}}
{"timestamp":"2017-03-23T13:00:00","total":-776}
{"index": {"_id":"1180"}}
{"timestamp":"2017-03-23T13:00:00","total":11366}
{"index": {"_id":"1181"}}
{"timestamp":"2017-03-23T13:00:00","total":3606}
{"index": {"_id":"1182"}}
{"timestamp":"2017-03-23T13:00:00","total":19006}
{"index": {"_id":"1183"}}
{"timestamp":"2017-03-23T13:00:00","total":38613}
{"index": {"_id":"1184"}}
{"timestamp":"2017-03-23T13:00:00","total":19516}
{"index": {"_id":"1185"}}
{"timestamp":"2017-03-23T13:00:00","total":-258}
{"index": {"_id":"1186"}}
{"timestamp":"2017-03-23T13:00:00","total":9551}
{"index": {"_id":"1187"}}
{"timestamp":"2017-03-23T13:00:00","total":11217}
{"index": {"_id":"1188"}}
{"timestamp":"2017-03-23T13:00:00","total":22557}
{"index": {"_id":"1189"}}
{"timestamp":"2017-03-23T13:00:00","total":40508}
{"index": {"_id":"1190"}}
{"timestamp":"2017-03-23T13:00:00","total":11887}
{"index": {"_id":"1191"}}
{"timestamp":"2017-03-23T13:00:00","total":31659}
'''
buildRestTests.setups['server_metrics_job'] = buildRestTests.setups['server_metrics_data'] + '''
- do:
xpack.ml.put_job:
job_id: "total-requests"
body: >
{
"description" : "Total sum of requests",
"analysis_config" : {
"bucket_span":"10m",
"detectors" :[
{
"detector_description": "Sum of total",
"function": "sum",
"field_name": "total"
}
]},
"data_description" : {
"time_field":"timestamp",
"time_format": "epoch_ms"
}
}
'''
buildRestTests.setups['server_metrics_datafeed'] = buildRestTests.setups['server_metrics_job'] + '''
- do:
xpack.ml.put_datafeed:
datafeed_id: "datafeed-total-requests"
body: >
{
"job_id":"total-requests",
"indexes":"server-metrics"
}
'''
buildRestTests.setups['server_metrics_openjob'] = buildRestTests.setups['server_metrics_datafeed'] + '''
- do:
xpack.ml.open_job:
job_id: "total-requests"
'''
buildRestTests.setups['server_metrics_startdf'] = buildRestTests.setups['server_metrics_openjob'] + '''
- do:
xpack.ml.start_datafeed:
datafeed_id: "datafeed-total-requests"
'''
buildRestTests.setups['calendar_outages'] = '''
- do:
xpack.ml.put_calendar:
calendar_id: "planned-outages"
'''
buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages'] + '''
- do:
xpack.ml.post_calendar_events:
calendar_id: "planned-outages"
body: >
{ "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "planned-outages" }
'''
buildRestTests.setups['calendar_outages_openjob'] = buildRestTests.setups['server_metrics_openjob'] + '''
- do:
xpack.ml.put_calendar:
calendar_id: "planned-outages"
'''
buildRestTests.setups['calendar_outages_addjob'] = buildRestTests.setups['server_metrics_openjob'] + '''
- do:
xpack.ml.put_calendar:
calendar_id: "planned-outages"
body: >
{
"job_ids": ["total-requests"]
}
'''
buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages_addjob'] + '''
- do:
xpack.ml.post_calendar_events:
calendar_id: "planned-outages"
body: >
{ "events" : [
{ "description": "event 1", "start_time": "1513641600000", "end_time": "1513728000000"},
{ "description": "event 2", "start_time": "1513814400000", "end_time": "1513900800000"},
{ "description": "event 3", "start_time": "1514160000000", "end_time": "1514246400000"}
]}
'''