OpenSearch/docs/build.gradle
Jim Ferenczi e48bc2eed7 Add field collapsing for search request (#22337)
* Add top hits collapsing to search request

The field collapsing is done with a custom top docs collector that "collapse" search hits with same field value.
The distributed aspect is resolve using the two passes that the regular search uses. The first pass "collapse" the top hits, then the coordinating node merge/collapse the top hits from each shard.

```
GET _search
{
   "collapse": {
      "field": "category",
   }
}
```

This change also adds an ExpandCollapseSearchResponseListener that intercepts the search response and expands collapsed hits using the CollapseBuilder#innerHit} options.
The retrieval of each inner_hits is done by sending a query to all shards filtered by the collapse key.

```
GET _search
{
   "collapse": {
      "field": "category",
      "inner_hits": {
	"size": 2
      }
   }
}
```
2017-01-23 16:33:51 +01:00

389 lines
15 KiB
Groovy

/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
apply plugin: 'elasticsearch.docs-test'
/* List of files that have snippets that probably should be converted to
* `// CONSOLE` and `// TESTRESPONSE` but have yet to be converted. Try and
* only remove entries from this list. When it is empty we'll remove it
* entirely and have a party! There will be cake and everything.... */
buildRestTests.expectedUnconvertedCandidates = [
'reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc',
'reference/aggregations/bucket/geodistance-aggregation.asciidoc',
'reference/aggregations/bucket/geohashgrid-aggregation.asciidoc',
'reference/aggregations/bucket/histogram-aggregation.asciidoc',
'reference/aggregations/bucket/iprange-aggregation.asciidoc',
'reference/aggregations/bucket/missing-aggregation.asciidoc',
'reference/aggregations/bucket/nested-aggregation.asciidoc',
'reference/aggregations/bucket/range-aggregation.asciidoc',
'reference/aggregations/bucket/reverse-nested-aggregation.asciidoc',
'reference/aggregations/bucket/sampler-aggregation.asciidoc',
'reference/aggregations/bucket/significantterms-aggregation.asciidoc',
'reference/aggregations/bucket/terms-aggregation.asciidoc',
'reference/aggregations/matrix/stats-aggregation.asciidoc',
'reference/aggregations/metrics/avg-aggregation.asciidoc',
'reference/aggregations/metrics/cardinality-aggregation.asciidoc',
'reference/aggregations/metrics/extendedstats-aggregation.asciidoc',
'reference/aggregations/metrics/geobounds-aggregation.asciidoc',
'reference/aggregations/metrics/geocentroid-aggregation.asciidoc',
'reference/aggregations/metrics/percentile-aggregation.asciidoc',
'reference/aggregations/metrics/percentile-rank-aggregation.asciidoc',
'reference/aggregations/metrics/scripted-metric-aggregation.asciidoc',
'reference/aggregations/metrics/stats-aggregation.asciidoc',
'reference/aggregations/metrics/sum-aggregation.asciidoc',
'reference/aggregations/metrics/tophits-aggregation.asciidoc',
'reference/aggregations/pipeline.asciidoc',
'reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc',
'reference/aggregations/pipeline/bucket-script-aggregation.asciidoc',
'reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc',
'reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc',
'reference/aggregations/pipeline/derivative-aggregation.asciidoc',
'reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc',
'reference/aggregations/pipeline/max-bucket-aggregation.asciidoc',
'reference/aggregations/pipeline/min-bucket-aggregation.asciidoc',
'reference/aggregations/pipeline/movavg-aggregation.asciidoc',
'reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc',
'reference/aggregations/pipeline/serial-diff-aggregation.asciidoc',
'reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc',
'reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc',
'reference/analysis/analyzers/lang-analyzer.asciidoc',
'reference/analysis/analyzers/pattern-analyzer.asciidoc',
'reference/analysis/charfilters/htmlstrip-charfilter.asciidoc',
'reference/analysis/charfilters/pattern-replace-charfilter.asciidoc',
'reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/elision-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/stop-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc',
'reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc',
'reference/cat/recovery.asciidoc',
'reference/cat/shards.asciidoc',
'reference/cat/snapshots.asciidoc',
'reference/cat/templates.asciidoc',
'reference/cat/thread_pool.asciidoc',
'reference/cluster/allocation-explain.asciidoc',
'reference/cluster/nodes-info.asciidoc',
'reference/cluster/nodes-stats.asciidoc',
'reference/cluster/pending.asciidoc',
'reference/cluster/reroute.asciidoc',
'reference/cluster/state.asciidoc',
'reference/cluster/stats.asciidoc',
'reference/cluster/tasks.asciidoc',
'reference/cluster/update-settings.asciidoc',
'reference/docs/bulk.asciidoc',
'reference/docs/delete-by-query.asciidoc',
'reference/docs/delete.asciidoc',
'reference/docs/index_.asciidoc',
'reference/docs/multi-get.asciidoc',
'reference/docs/multi-termvectors.asciidoc',
'reference/docs/reindex.asciidoc',
'reference/docs/termvectors.asciidoc',
'reference/docs/update-by-query.asciidoc',
'reference/docs/update.asciidoc',
'reference/index-modules/similarity.asciidoc',
'reference/index-modules/store.asciidoc',
'reference/index-modules/translog.asciidoc',
'reference/indices/analyze.asciidoc',
'reference/indices/flush.asciidoc',
'reference/indices/get-field-mapping.asciidoc',
'reference/indices/get-settings.asciidoc',
'reference/indices/put-mapping.asciidoc',
'reference/indices/recovery.asciidoc',
'reference/indices/segments.asciidoc',
'reference/indices/shadow-replicas.asciidoc',
'reference/indices/shard-stores.asciidoc',
'reference/indices/update-settings.asciidoc',
'reference/ingest/ingest-node.asciidoc',
'reference/mapping/dynamic/templates.asciidoc',
'reference/mapping/fields/all-field.asciidoc',
'reference/mapping/params/analyzer.asciidoc',
'reference/mapping/types/binary.asciidoc',
'reference/mapping/types/geo-point.asciidoc',
'reference/mapping/types/geo-shape.asciidoc',
'reference/mapping/types/ip.asciidoc',
'reference/mapping/types/nested.asciidoc',
'reference/mapping/types/object.asciidoc',
'reference/mapping/types/percolator.asciidoc',
'reference/modules/scripting/native.asciidoc',
'reference/modules/scripting/security.asciidoc',
'reference/modules/scripting/using.asciidoc',
'reference/modules/transport.asciidoc',
'reference/modules/cross-cluster-search.asciidoc', // this is hart to test since we need 2 clusters -- maybe we can trick it into referencing itself...
'reference/query-dsl/exists-query.asciidoc',
'reference/query-dsl/function-score-query.asciidoc',
'reference/query-dsl/geo-shape-query.asciidoc',
'reference/query-dsl/terms-query.asciidoc',
'reference/search/field-stats.asciidoc',
'reference/search/multi-search.asciidoc',
'reference/search/profile.asciidoc',
'reference/search/request/highlighting.asciidoc',
'reference/search/request/inner-hits.asciidoc',
'reference/search/request/rescore.asciidoc',
'reference/search/search-template.asciidoc',
]
integTest {
cluster {
setting 'script.inline', 'true'
setting 'script.stored', 'true'
setting 'script.max_compilations_per_minute', '1000'
/* Enable regexes in painless so our tests don't complain about example
* snippets that use them. */
setting 'script.painless.regex.enabled', 'true'
Closure configFile = {
extraConfigFile it, "src/test/cluster/config/$it"
}
configFile 'scripts/my_script.painless'
configFile 'scripts/my_init_script.painless'
configFile 'scripts/my_map_script.painless'
configFile 'scripts/my_combine_script.painless'
configFile 'scripts/my_reduce_script.painless'
configFile 'userdict_ja.txt'
configFile 'KeywordTokenizer.rbbi'
// Whitelist reindexing from the local node so we can test it.
setting 'reindex.remote.whitelist', '127.0.0.1:*'
}
}
// Build the cluser with all plugins
project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj ->
/* Skip repositories. We just aren't going to be able to test them so it
* doesn't make sense to waste time installing them. */
if (subproj.path.startsWith(':plugins:repository-')) {
return
}
subproj.afterEvaluate { // need to wait until the project has been configured
integTest {
cluster {
plugin subproj.path
}
}
}
}
buildRestTests.docs = fileTree(projectDir) {
// No snippets in here!
exclude 'build.gradle'
// That is where the snippets go, not where they come from!
exclude 'build'
// This file simply doesn't pass yet. We should figure out how to fix it.
exclude 'reference/modules/snapshots.asciidoc'
}
Closure setupTwitter = { String name, int count ->
buildRestTests.setups[name] = '''
- do:
indices.create:
index: twitter
body:
settings:
number_of_shards: 1
number_of_replicas: 1
mappings:
tweet:
properties:
user:
type: keyword
doc_values: true
date:
type: date
likes:
type: long
- do:
bulk:
index: twitter
type: tweet
refresh: true
body: |'''
for (int i = 0; i < count; i++) {
String user, text
if (i == 0) {
user = 'kimchy'
text = 'trying out Elasticsearch'
} else {
user = 'test'
text = "some message with the number $i"
}
buildRestTests.setups[name] += """
{"index":{"_id": "$i"}}
{"user": "$user", "message": "$text", "date": "2009-11-15T14:12:12", "likes": $i}"""
}
}
setupTwitter('twitter', 5)
setupTwitter('big_twitter', 120)
setupTwitter('huge_twitter', 1200)
buildRestTests.setups['host'] = '''
# Fetch the http host. We use the host of the master because we know there will always be a master.
- do:
cluster.state: {}
- set: { master_node: master }
- do:
nodes.info:
metric: [ http ]
- is_true: nodes.$master.http.publish_address
- set: {nodes.$master.http.publish_address: host}
'''
// Used by scripted metric docs
buildRestTests.setups['ledger'] = '''
- do:
indices.create:
index: ledger
body:
settings:
number_of_shards: 2
number_of_replicas: 1
mappings:
sale:
properties:
type:
type: keyword
amount:
type: double
- do:
bulk:
index: ledger
type: item
refresh: true
body: |
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 200, "type": "sale", "description": "something"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 10, "type": "expense", "decription": "another thing"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 150, "type": "sale", "description": "blah"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 50, "type": "expense", "description": "cost of blah"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "amount": 50, "type": "expense", "description": "advertisement"}'''
// Used by pipeline aggregation docs
buildRestTests.setups['sales'] = '''
- do:
indices.create:
index: sales
body:
settings:
number_of_shards: 2
number_of_replicas: 1
mappings:
sale:
properties:
type:
type: keyword
- do:
bulk:
index: sales
type: sale
refresh: true
body: |
{"index":{}}
{"date": "2015/01/01 00:00:00", "price": 200, "type": "hat"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "price": 200, "type": "t-shirt"}
{"index":{}}
{"date": "2015/01/01 00:00:00", "price": 150, "type": "bag"}
{"index":{}}
{"date": "2015/02/01 00:00:00", "price": 50, "type": "hat"}
{"index":{}}
{"date": "2015/02/01 00:00:00", "price": 10, "type": "t-shirt"}
{"index":{}}
{"date": "2015/03/01 00:00:00", "price": 200, "type": "hat"}
{"index":{}}
{"date": "2015/03/01 00:00:00", "price": 175, "type": "t-shirt"}'''
// Dummy bank account data used by getting-started.asciidoc
buildRestTests.setups['bank'] = '''
- do:
bulk:
index: bank
type: account
refresh: true
body: |
#bank_data#
'''
/* Load the actual accounts only if we're going to use them. This complicates
* dependency checking but that is a small price to pay for not building a
* 400kb string every time we start the build. */
File accountsFile = new File("$projectDir/src/test/resources/accounts.json")
buildRestTests.inputs.file(accountsFile)
buildRestTests.doFirst {
String accounts = accountsFile.getText('UTF-8')
// Indent like a yaml test needs
accounts = accounts.replaceAll('(?m)^', ' ')
buildRestTests.setups['bank'] =
buildRestTests.setups['bank'].replace('#bank_data#', accounts)
}
buildRestTests.setups['range_index'] = '''
- do :
indices.create:
index: range_index
body:
settings:
number_of_shards: 2
number_of_replicas: 1
mappings:
my_type:
properties:
expected_attendees:
type: integer_range
time_frame:
type: date_range
format: yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis
- do:
bulk:
index: range_index
type: my_type
refresh: true
body: |
{"index":{"_id": 1}}
{"expected_attendees": {"gte": 10, "lte": 20}, "time_frame": {"gte": "2015-10-31 12:00:00", "lte": "2015-11-01"}}'''
// Used by index boost doc
buildRestTests.setups['index_boost'] = '''
- do:
indices.create:
index: index1
- do:
indices.create:
index: index2
- do:
indices.put_alias:
index: index1
name: alias1
'''