2016-04-29 10:42:03 -04:00
|
|
|
/*
|
|
|
|
* Licensed to Elasticsearch under one or more contributor
|
|
|
|
* license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright
|
|
|
|
* ownership. Elasticsearch licenses this file to you under
|
|
|
|
* the Apache License, Version 2.0 (the "License"); you may
|
|
|
|
* not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing,
|
|
|
|
* software distributed under the License is distributed on an
|
|
|
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
|
|
* KIND, either express or implied. See the License for the
|
|
|
|
* specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*/
|
|
|
|
|
2016-05-05 16:46:40 -04:00
|
|
|
apply plugin: 'elasticsearch.docs-test'
|
2016-04-29 10:42:03 -04:00
|
|
|
|
2016-09-09 11:10:13 -04:00
|
|
|
/* List of files that have snippets that probably should be converted to
|
|
|
|
* `// CONSOLE` and `// TESTRESPONSE` but have yet to be converted. Try and
|
|
|
|
* only remove entries from this list. When it is empty we'll remove it
|
|
|
|
* entirely and have a party! There will be cake and everything.... */
|
|
|
|
buildRestTests.expectedUnconvertedCandidates = [
|
|
|
|
'reference/aggregations/bucket/datehistogram-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/daterange-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/filter-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/geodistance-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/geohashgrid-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/global-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/histogram-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/iprange-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/missing-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/nested-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/range-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/reverse-nested-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/sampler-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/significantterms-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/bucket/terms-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/matrix/stats-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/avg-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/cardinality-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/extendedstats-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/geobounds-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/geocentroid-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/max-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/min-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/percentile-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/percentile-rank-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/scripted-metric-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/stats-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/sum-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/tophits-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/metrics/valuecount-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/bucket-script-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/derivative-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/max-bucket-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/min-bucket-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/movavg-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/serial-diff-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc',
|
|
|
|
'reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc',
|
|
|
|
'reference/analysis/analyzers/lang-analyzer.asciidoc',
|
|
|
|
'reference/analysis/analyzers/pattern-analyzer.asciidoc',
|
|
|
|
'reference/analysis/charfilters/htmlstrip-charfilter.asciidoc',
|
|
|
|
'reference/analysis/charfilters/pattern-replace-charfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/elision-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/stop-tokenfilter.asciidoc',
|
|
|
|
'reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc',
|
2016-11-28 12:25:49 -05:00
|
|
|
'reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc',
|
2016-09-09 11:10:13 -04:00
|
|
|
'reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc',
|
|
|
|
'reference/cat/recovery.asciidoc',
|
|
|
|
'reference/cat/shards.asciidoc',
|
|
|
|
'reference/cat/snapshots.asciidoc',
|
2016-09-20 05:02:55 -04:00
|
|
|
'reference/cat/templates.asciidoc',
|
2016-09-09 11:10:13 -04:00
|
|
|
'reference/cat/thread_pool.asciidoc',
|
|
|
|
'reference/cluster/allocation-explain.asciidoc',
|
|
|
|
'reference/cluster/nodes-info.asciidoc',
|
|
|
|
'reference/cluster/nodes-stats.asciidoc',
|
|
|
|
'reference/cluster/pending.asciidoc',
|
|
|
|
'reference/cluster/reroute.asciidoc',
|
|
|
|
'reference/cluster/state.asciidoc',
|
|
|
|
'reference/cluster/stats.asciidoc',
|
|
|
|
'reference/cluster/tasks.asciidoc',
|
|
|
|
'reference/cluster/update-settings.asciidoc',
|
|
|
|
'reference/docs/bulk.asciidoc',
|
|
|
|
'reference/docs/delete-by-query.asciidoc',
|
|
|
|
'reference/docs/delete.asciidoc',
|
|
|
|
'reference/docs/index_.asciidoc',
|
|
|
|
'reference/docs/multi-get.asciidoc',
|
|
|
|
'reference/docs/multi-termvectors.asciidoc',
|
|
|
|
'reference/docs/reindex.asciidoc',
|
|
|
|
'reference/docs/termvectors.asciidoc',
|
|
|
|
'reference/docs/update-by-query.asciidoc',
|
|
|
|
'reference/docs/update.asciidoc',
|
|
|
|
'reference/index-modules/similarity.asciidoc',
|
|
|
|
'reference/index-modules/store.asciidoc',
|
|
|
|
'reference/index-modules/translog.asciidoc',
|
|
|
|
'reference/indices/analyze.asciidoc',
|
|
|
|
'reference/indices/flush.asciidoc',
|
|
|
|
'reference/indices/get-field-mapping.asciidoc',
|
|
|
|
'reference/indices/get-settings.asciidoc',
|
|
|
|
'reference/indices/put-mapping.asciidoc',
|
|
|
|
'reference/indices/recovery.asciidoc',
|
|
|
|
'reference/indices/segments.asciidoc',
|
|
|
|
'reference/indices/shadow-replicas.asciidoc',
|
|
|
|
'reference/indices/shard-stores.asciidoc',
|
|
|
|
'reference/indices/update-settings.asciidoc',
|
|
|
|
'reference/ingest/ingest-node.asciidoc',
|
|
|
|
'reference/mapping/dynamic/templates.asciidoc',
|
|
|
|
'reference/mapping/fields/all-field.asciidoc',
|
|
|
|
'reference/mapping/params/analyzer.asciidoc',
|
|
|
|
'reference/mapping/types/binary.asciidoc',
|
|
|
|
'reference/mapping/types/geo-point.asciidoc',
|
|
|
|
'reference/mapping/types/geo-shape.asciidoc',
|
|
|
|
'reference/mapping/types/ip.asciidoc',
|
|
|
|
'reference/mapping/types/nested.asciidoc',
|
|
|
|
'reference/mapping/types/object.asciidoc',
|
|
|
|
'reference/mapping/types/percolator.asciidoc',
|
|
|
|
'reference/modules/scripting/native.asciidoc',
|
|
|
|
'reference/modules/scripting/security.asciidoc',
|
|
|
|
'reference/modules/scripting/using.asciidoc',
|
|
|
|
'reference/modules/transport.asciidoc',
|
2017-01-05 10:10:34 -05:00
|
|
|
'reference/modules/cross-cluster-search.asciidoc', // this is hart to test since we need 2 clusters -- maybe we can trick it into referencing itself...
|
2016-09-09 11:10:13 -04:00
|
|
|
'reference/query-dsl/exists-query.asciidoc',
|
|
|
|
'reference/query-dsl/function-score-query.asciidoc',
|
|
|
|
'reference/query-dsl/geo-shape-query.asciidoc',
|
|
|
|
'reference/query-dsl/terms-query.asciidoc',
|
|
|
|
'reference/search/field-stats.asciidoc',
|
|
|
|
'reference/search/multi-search.asciidoc',
|
|
|
|
'reference/search/profile.asciidoc',
|
|
|
|
'reference/search/request/highlighting.asciidoc',
|
|
|
|
'reference/search/request/inner-hits.asciidoc',
|
|
|
|
'reference/search/request/rescore.asciidoc',
|
|
|
|
'reference/search/search-template.asciidoc',
|
|
|
|
]
|
|
|
|
|
2016-05-05 16:46:40 -04:00
|
|
|
integTest {
|
|
|
|
cluster {
|
|
|
|
setting 'script.inline', 'true'
|
2016-05-13 16:15:51 -04:00
|
|
|
setting 'script.stored', 'true'
|
Circuit break the number of inline scripts compiled per minute
When compiling many dynamically changing scripts, parameterized
scripts (<https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting-using.html#prefer-params>)
should be preferred. This enforces a limit to the number of scripts that
can be compiled within a minute. A new dynamic setting is added -
`script.max_compilations_per_minute`, which defaults to 15.
If more dynamic scripts are sent, a user will get the following
exception:
```json
{
"error" : {
"root_cause" : [
{
"type" : "circuit_breaking_exception",
"reason" : "[script] Too many dynamic script compilations within one minute, max: [15/min]; please use on-disk, indexed, or scripts with parameters instead",
"bytes_wanted" : 0,
"bytes_limit" : 0
}
],
"type" : "search_phase_execution_exception",
"reason" : "all shards failed",
"phase" : "query",
"grouped" : true,
"failed_shards" : [
{
"shard" : 0,
"index" : "i",
"node" : "a5V1eXcZRYiIk8lecjZ4Jw",
"reason" : {
"type" : "general_script_exception",
"reason" : "Failed to compile inline script [\"aaaaaaaaaaaaaaaa\"] using lang [painless]",
"caused_by" : {
"type" : "circuit_breaking_exception",
"reason" : "[script] Too many dynamic script compilations within one minute, max: [15/min]; please use on-disk, indexed, or scripts with parameters instead",
"bytes_wanted" : 0,
"bytes_limit" : 0
}
}
}
],
"caused_by" : {
"type" : "general_script_exception",
"reason" : "Failed to compile inline script [\"aaaaaaaaaaaaaaaa\"] using lang [painless]",
"caused_by" : {
"type" : "circuit_breaking_exception",
"reason" : "[script] Too many dynamic script compilations within one minute, max: [15/min]; please use on-disk, indexed, or scripts with parameters instead",
"bytes_wanted" : 0,
"bytes_limit" : 0
}
}
},
"status" : 500
}
```
This also fixes a bug in `ScriptService` where requests being executed
concurrently on a single node could cause a script to be compiled
multiple times (many in the case of a powerful node with many shards)
due to no synchronization between checking the cache and compiling the
script. There is now synchronization so that a script being compiled
will only be compiled once regardless of the number of concurrent
searches on a node.
Relates to #19396
2016-07-26 15:36:29 -04:00
|
|
|
setting 'script.max_compilations_per_minute', '1000'
|
2016-09-12 12:06:29 -04:00
|
|
|
/* Enable regexes in painless so our tests don't complain about example
|
|
|
|
* snippets that use them. */
|
|
|
|
setting 'script.painless.regex.enabled', 'true'
|
2016-05-13 16:15:51 -04:00
|
|
|
Closure configFile = {
|
|
|
|
extraConfigFile it, "src/test/cluster/config/$it"
|
|
|
|
}
|
|
|
|
configFile 'scripts/my_script.js'
|
|
|
|
configFile 'scripts/my_script.py'
|
2016-11-15 11:45:54 -05:00
|
|
|
configFile 'scripts/my_init_script.painless'
|
|
|
|
configFile 'scripts/my_map_script.painless'
|
|
|
|
configFile 'scripts/my_combine_script.painless'
|
|
|
|
configFile 'scripts/my_reduce_script.painless'
|
2016-05-13 16:15:51 -04:00
|
|
|
configFile 'userdict_ja.txt'
|
|
|
|
configFile 'KeywordTokenizer.rbbi'
|
2016-05-19 12:40:22 -04:00
|
|
|
// Whitelist reindexing from the local node so we can test it.
|
2016-10-18 15:00:49 -04:00
|
|
|
setting 'reindex.remote.whitelist', '127.0.0.1:*'
|
2016-05-13 16:15:51 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build the cluser with all plugins
|
2016-07-15 17:34:21 -04:00
|
|
|
|
2016-05-13 16:15:51 -04:00
|
|
|
project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj ->
|
|
|
|
/* Skip repositories. We just aren't going to be able to test them so it
|
|
|
|
* doesn't make sense to waste time installing them. */
|
|
|
|
if (subproj.path.startsWith(':plugins:repository-')) {
|
|
|
|
return
|
|
|
|
}
|
2016-07-15 17:34:21 -04:00
|
|
|
subproj.afterEvaluate { // need to wait until the project has been configured
|
|
|
|
integTest {
|
|
|
|
cluster {
|
|
|
|
plugin subproj.path
|
|
|
|
}
|
2016-05-13 16:15:51 -04:00
|
|
|
}
|
2016-05-05 16:46:40 -04:00
|
|
|
}
|
2016-04-29 10:42:03 -04:00
|
|
|
}
|
|
|
|
|
2016-05-05 16:46:40 -04:00
|
|
|
buildRestTests.docs = fileTree(projectDir) {
|
|
|
|
// No snippets in here!
|
|
|
|
exclude 'build.gradle'
|
|
|
|
// That is where the snippets go, not where they come from!
|
|
|
|
exclude 'build'
|
|
|
|
// This file simply doesn't pass yet. We should figure out how to fix it.
|
|
|
|
exclude 'reference/modules/snapshots.asciidoc'
|
2016-04-29 10:42:03 -04:00
|
|
|
}
|
|
|
|
|
2016-05-05 16:46:40 -04:00
|
|
|
Closure setupTwitter = { String name, int count ->
|
|
|
|
buildRestTests.setups[name] = '''
|
2016-07-22 18:51:36 -04:00
|
|
|
- do:
|
2016-09-01 17:08:18 -04:00
|
|
|
indices.create:
|
|
|
|
index: twitter
|
|
|
|
body:
|
|
|
|
settings:
|
|
|
|
number_of_shards: 1
|
|
|
|
number_of_replicas: 1
|
2016-04-29 10:42:03 -04:00
|
|
|
- do:
|
2016-09-01 17:08:18 -04:00
|
|
|
bulk:
|
|
|
|
index: twitter
|
|
|
|
type: tweet
|
|
|
|
refresh: true
|
|
|
|
body: |'''
|
2016-04-29 10:42:03 -04:00
|
|
|
for (int i = 0; i < count; i++) {
|
2016-05-10 18:29:56 -04:00
|
|
|
String user, text
|
|
|
|
if (i == 0) {
|
|
|
|
user = 'kimchy'
|
|
|
|
text = 'trying out Elasticsearch'
|
|
|
|
} else {
|
|
|
|
user = 'test'
|
|
|
|
text = "some message with the number $i"
|
|
|
|
}
|
2016-05-05 16:46:40 -04:00
|
|
|
buildRestTests.setups[name] += """
|
2016-09-01 17:08:18 -04:00
|
|
|
{"index":{"_id": "$i"}}
|
|
|
|
{"user": "$user", "message": "$text", "date": "2009-11-15T14:12:12", "likes": $i}"""
|
2016-04-29 10:42:03 -04:00
|
|
|
}
|
|
|
|
}
|
2016-05-05 16:46:40 -04:00
|
|
|
setupTwitter('twitter', 5)
|
|
|
|
setupTwitter('big_twitter', 120)
|
2016-09-01 17:08:18 -04:00
|
|
|
setupTwitter('huge_twitter', 1200)
|
2016-05-19 12:40:22 -04:00
|
|
|
|
|
|
|
buildRestTests.setups['host'] = '''
|
|
|
|
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
|
|
|
- do:
|
|
|
|
cluster.state: {}
|
|
|
|
- set: { master_node: master }
|
|
|
|
- do:
|
|
|
|
nodes.info:
|
|
|
|
metric: [ http ]
|
|
|
|
- is_true: nodes.$master.http.publish_address
|
|
|
|
- set: {nodes.$master.http.publish_address: host}
|
|
|
|
'''
|
2016-08-12 18:42:19 -04:00
|
|
|
|
2016-11-15 11:45:54 -05:00
|
|
|
// Used by scripted metric docs
|
|
|
|
buildRestTests.setups['ledger'] = '''
|
|
|
|
- do:
|
|
|
|
indices.create:
|
|
|
|
index: ledger
|
|
|
|
body:
|
|
|
|
settings:
|
|
|
|
number_of_shards: 2
|
|
|
|
number_of_replicas: 1
|
|
|
|
mappings:
|
|
|
|
sale:
|
|
|
|
properties:
|
|
|
|
type:
|
|
|
|
type: keyword
|
|
|
|
amount:
|
|
|
|
type: double
|
|
|
|
- do:
|
|
|
|
bulk:
|
|
|
|
index: ledger
|
|
|
|
type: item
|
|
|
|
refresh: true
|
|
|
|
body: |
|
|
|
|
{"index":{}}
|
|
|
|
{"date": "2015/01/01 00:00:00", "amount": 200, "type": "sale", "description": "something"}
|
|
|
|
{"index":{}}
|
|
|
|
{"date": "2015/01/01 00:00:00", "amount": 10, "type": "expense", "decription": "another thing"}
|
|
|
|
{"index":{}}
|
|
|
|
{"date": "2015/01/01 00:00:00", "amount": 150, "type": "sale", "description": "blah"}
|
|
|
|
{"index":{}}
|
|
|
|
{"date": "2015/01/01 00:00:00", "amount": 50, "type": "expense", "description": "cost of blah"}
|
|
|
|
{"index":{}}
|
|
|
|
{"date": "2015/01/01 00:00:00", "amount": 50, "type": "expense", "description": "advertisement"}'''
|
|
|
|
|
2016-08-12 18:42:19 -04:00
|
|
|
// Used by pipeline aggregation docs
|
|
|
|
buildRestTests.setups['sales'] = '''
|
|
|
|
- do:
|
|
|
|
indices.create:
|
|
|
|
index: sales
|
|
|
|
body:
|
|
|
|
settings:
|
|
|
|
number_of_shards: 2
|
|
|
|
number_of_replicas: 1
|
|
|
|
mappings:
|
|
|
|
sale:
|
|
|
|
properties:
|
|
|
|
type:
|
|
|
|
type: keyword
|
|
|
|
- do:
|
|
|
|
bulk:
|
|
|
|
index: sales
|
|
|
|
type: sale
|
|
|
|
refresh: true
|
|
|
|
body: |
|
|
|
|
{"index":{}}
|
|
|
|
{"date": "2015/01/01 00:00:00", "price": 200, "type": "hat"}
|
|
|
|
{"index":{}}
|
|
|
|
{"date": "2015/01/01 00:00:00", "price": 200, "type": "t-shirt"}
|
|
|
|
{"index":{}}
|
|
|
|
{"date": "2015/01/01 00:00:00", "price": 150, "type": "bag"}
|
|
|
|
{"index":{}}
|
|
|
|
{"date": "2015/02/01 00:00:00", "price": 50, "type": "hat"}
|
|
|
|
{"index":{}}
|
|
|
|
{"date": "2015/02/01 00:00:00", "price": 10, "type": "t-shirt"}
|
|
|
|
{"index":{}}
|
|
|
|
{"date": "2015/03/01 00:00:00", "price": 200, "type": "hat"}
|
|
|
|
{"index":{}}
|
|
|
|
{"date": "2015/03/01 00:00:00", "price": 175, "type": "t-shirt"}'''
|
2016-10-02 23:16:21 -04:00
|
|
|
|
|
|
|
// Dummy bank account data used by getting-started.asciidoc
|
|
|
|
buildRestTests.setups['bank'] = '''
|
|
|
|
- do:
|
|
|
|
bulk:
|
|
|
|
index: bank
|
|
|
|
type: account
|
|
|
|
refresh: true
|
|
|
|
body: |
|
|
|
|
#bank_data#
|
|
|
|
'''
|
|
|
|
/* Load the actual accounts only if we're going to use them. This complicates
|
|
|
|
* dependency checking but that is a small price to pay for not building a
|
|
|
|
* 400kb string every time we start the build. */
|
|
|
|
File accountsFile = new File("$projectDir/src/test/resources/accounts.json")
|
|
|
|
buildRestTests.inputs.file(accountsFile)
|
|
|
|
buildRestTests.doFirst {
|
|
|
|
String accounts = accountsFile.getText('UTF-8')
|
|
|
|
// Indent like a yaml test needs
|
|
|
|
accounts = accounts.replaceAll('(?m)^', ' ')
|
|
|
|
buildRestTests.setups['bank'] =
|
|
|
|
buildRestTests.setups['bank'].replace('#bank_data#', accounts)
|
|
|
|
}
|
Add RangeFieldMapper for numeric and date range types
Lucene 6.2 added index and query support for numeric ranges. This commit adds a new RangeFieldMapper for indexing numeric (int, long, float, double) and date ranges and creating appropriate range and term queries. The design is similar to NumericFieldMapper in that it uses a RangeType enumerator for implementing the logic specific to each type. The following range types are supported by this field mapper: int_range, float_range, long_range, double_range, date_range.
Lucene does not provide a DocValue field specific to RangeField types so the RangeFieldMapper implements a CustomRangeDocValuesField for handling doc value support.
When executing a Range query over a Range field, the RangeQueryBuilder has been enhanced to accept a new relation parameter for defining the type of query as one of: WITHIN, CONTAINS, INTERSECTS. This provides support for finding all ranges that are related to a specific range in a desired way. As with other spatial queries, DISJOINT can be achieved as a MUST_NOT of an INTERSECTS query.
2016-09-16 09:50:56 -04:00
|
|
|
|
|
|
|
buildRestTests.setups['range_index'] = '''
|
|
|
|
- do :
|
|
|
|
indices.create:
|
|
|
|
index: range_index
|
|
|
|
body:
|
|
|
|
settings:
|
|
|
|
number_of_shards: 2
|
|
|
|
number_of_replicas: 1
|
|
|
|
mappings:
|
|
|
|
my_type:
|
|
|
|
properties:
|
|
|
|
expected_attendees:
|
|
|
|
type: integer_range
|
|
|
|
time_frame:
|
|
|
|
type: date_range
|
|
|
|
format: yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis
|
|
|
|
- do:
|
|
|
|
bulk:
|
|
|
|
index: range_index
|
|
|
|
type: my_type
|
|
|
|
refresh: true
|
|
|
|
body: |
|
|
|
|
{"index":{"_id": 1}}
|
|
|
|
{"expected_attendees": {"gte": 10, "lte": 20}, "time_frame": {"gte": "2015-10-31 12:00:00", "lte": "2015-11-01"}}'''
|
2016-11-07 03:20:06 -05:00
|
|
|
|
|
|
|
// Used by index boost doc
|
|
|
|
buildRestTests.setups['index_boost'] = '''
|
|
|
|
- do:
|
|
|
|
indices.create:
|
|
|
|
index: index1
|
|
|
|
- do:
|
|
|
|
indices.create:
|
|
|
|
index: index2
|
|
|
|
|
|
|
|
- do:
|
|
|
|
indices.put_alias:
|
|
|
|
index: index1
|
|
|
|
name: alias1
|
|
|
|
'''
|