[Rollup] Proactively resolve index patterns in RollupSearch endoint (#34930)

This changes the RollupSearch endpoint to proactively resolve index
patterns.  If the index pattern(s) match more than one rollup index,
an exception is throw as before.  But if the pattern only matches one
rollup index, execution is allowed to continue (unlike before where
it would assume all patterns were for raw data).

This also allows the search endpoint to resolve aliases that point to
a rollup index.

Also tweaks the documentation to make this clear.

Closes #34828
This commit is contained in:
Zachary Tong 2018-10-30 13:50:50 -04:00 committed by GitHub
parent 70da490f34
commit f9dd33a0b9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 186 additions and 5 deletions

View File

@ -30,6 +30,7 @@ Rules for the `index` parameter:
or using `_all`, is not permitted
- Multiple non-rollup indices may be specified
- Only one rollup index may be specified. If more than one are supplied an exception will be thrown
- Index patterns may be used, but if they match more than one rollup index an exception will be thrown.
==== Request Body

View File

@ -21,6 +21,7 @@ follows:
or using `_all`, is not permitted
- Multiple non-rollup indices may be specified
- Only one rollup index may be specified. If more than one are supplied an exception will be thrown
- Index patterns may be used, but if they match more than one rollup index an exception will be thrown.
This limitation is driven by the logic that decides which jobs are the "best" for any given query. If you have ten jobs stored in a single
index, which cover the source data with varying degrees of completeness and different intervals, the query needs to determine which set

View File

@ -18,6 +18,7 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
@ -100,8 +101,9 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
@Override
protected void doExecute(Task task, SearchRequest request, ActionListener<SearchResponse> listener) {
RollupSearchContext rollupSearchContext = separateIndices(request.indices(),
clusterService.state().getMetaData().indices());
IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(clusterService.getSettings());
String[] indices = resolver.concreteIndexNames(clusterService.state(), request.indicesOptions(), request.indices());
RollupSearchContext rollupSearchContext = separateIndices(indices, clusterService.state().getMetaData().indices());
MultiSearchRequest msearch = createMSearchRequest(request, registry, rollupSearchContext);
@ -401,9 +403,10 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
});
assert normal.size() + rollup.size() > 0;
if (rollup.size() > 1) {
throw new IllegalArgumentException("RollupSearch currently only supports searching one rollup index at a time.");
throw new IllegalArgumentException("RollupSearch currently only supports searching one rollup index at a time. " +
"Found the following rollup indices: " + rollup);
}
return new RollupSearchContext(normal.toArray(new String[normal.size()]), rollup.toArray(new String[rollup.size()]), jobCaps);
return new RollupSearchContext(normal.toArray(new String[0]), rollup.toArray(new String[0]), jobCaps);
}
class TransportHandler implements TransportRequestHandler<SearchRequest> {

View File

@ -686,7 +686,8 @@ public class SearchActionTests extends ESTestCase {
metaMap.put("bar", indexMeta);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> TransportRollupSearchAction.separateIndices(indices, metaMap.build()));
assertThat(e.getMessage(), equalTo("RollupSearch currently only supports searching one rollup index at a time."));
assertThat(e.getMessage(), equalTo("RollupSearch currently only supports searching one rollup index at a time. " +
"Found the following rollup indices: [foo, bar]"));
}
public void testEmptyMsearch() {

View File

@ -708,5 +708,180 @@ setup:
- match: { aggregations.histo.buckets.3.doc_count: 10 }
- match: { aggregations.histo.buckets.3.the_max.value: 3 }
---
"Wildcards matching single rollup index":
- do:
xpack.rollup.rollup_search:
index: "foo_rollup*"
body:
size: 0
aggs:
histo:
date_histogram:
field: "timestamp"
interval: "1h"
time_zone: "UTC"
- length: { aggregations.histo.buckets: 4 }
- match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" }
- match: { aggregations.histo.buckets.0.doc_count: 1 }
- match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" }
- match: { aggregations.histo.buckets.1.doc_count: 2 }
- match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" }
- match: { aggregations.histo.buckets.2.doc_count: 10 }
- match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" }
- match: { aggregations.histo.buckets.3.doc_count: 20 }
---
"Wildcards matching two rollup indices":
- do:
indices.create:
index: bar
body:
mappings:
_doc:
properties:
timestamp:
type: date
partition:
type: keyword
price:
type: integer
- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
xpack.rollup.put_job:
id: bar
body: >
{
"index_pattern": "bar",
"rollup_index": "bar_rollup",
"cron": "*/30 * * * * ?",
"page_size" :10,
"groups" : {
"date_histogram": {
"field": "timestamp",
"interval": "1h"
},
"terms": {
"fields": ["partition"]
}
},
"metrics": [
{
"field": "price",
"metrics": ["max"]
}
]
}
- do:
catch: /RollupSearch currently only supports searching one rollup index at a time\./
xpack.rollup.rollup_search:
index: "*_rollup"
body:
size: 0
aggs:
histo:
date_histogram:
field: "timestamp"
interval: "1h"
time_zone: "UTC"
---
"Rollup search via alias":
- do:
indices.put_alias:
index: foo_rollup
name: rollup_alias
- do:
xpack.rollup.rollup_search:
index: "rollup_alias"
body:
size: 0
aggs:
histo:
date_histogram:
field: "timestamp"
interval: "1h"
time_zone: "UTC"
- length: { aggregations.histo.buckets: 4 }
- match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" }
- match: { aggregations.histo.buckets.0.doc_count: 1 }
- match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" }
- match: { aggregations.histo.buckets.1.doc_count: 2 }
- match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" }
- match: { aggregations.histo.buckets.2.doc_count: 10 }
- match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" }
- match: { aggregations.histo.buckets.3.doc_count: 20 }
---
"Rollup search via alias, multiple rollup indices match":
- do:
indices.create:
index: bar
body:
mappings:
_doc:
properties:
timestamp:
type: date
partition:
type: keyword
price:
type: integer
- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
xpack.rollup.put_job:
id: bar
body: >
{
"index_pattern": "bar",
"rollup_index": "bar_rollup",
"cron": "*/30 * * * * ?",
"page_size" :10,
"groups" : {
"date_histogram": {
"field": "timestamp",
"interval": "1h"
},
"terms": {
"fields": ["partition"]
}
},
"metrics": [
{
"field": "price",
"metrics": ["max"]
}
]
}
- do:
indices.put_alias:
index: foo_rollup,bar_rollup
name: rollup_alias
- do:
catch: /RollupSearch currently only supports searching one rollup index at a time\./
xpack.rollup.rollup_search:
index: "rollup_alias"
body:
size: 0
aggs:
histo:
date_histogram:
field: "timestamp"
interval: "1h"
time_zone: "UTC"