Change cluster info actions to be able to resolve data streams. (#57343)

Backport of #56878 to 7.x branch.

With this change the following APIs will be able to resolve data streams:
get index, get mappings and ilm explain APIs.

Relates to #53100
This commit is contained in:
Martijn van Groningen 2020-05-29 12:17:53 +02:00 committed by GitHub
parent 322f953060
commit 04ef39da77
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 131 additions and 58 deletions

View File

@ -1,8 +1,8 @@
---
"Test apis that do not supported data streams":
- skip:
version: " - 7.99.99"
reason: "mute bwc until backported"
version: " - 7.8.99"
reason: "data streams only supported in 7.9+"
features: allowed_warnings
- do:
@ -40,11 +40,6 @@
indices.delete:
index: logs-foobar
- do:
catch: bad_request
indices.get:
index: logs-foobar
- do:
catch: bad_request
indices.put_settings:

View File

@ -0,0 +1,48 @@
---
setup:
- skip:
features: allowed_warnings
- do:
allowed_warnings:
- "index template [logs_template] has index patterns [logs-foobar] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs_template] will take precedence during new index creation"
indices.put_index_template:
name: logs_template
body:
index_patterns: logs-foobar
data_stream:
timestamp_field: '@timestamp'
- do:
indices.create_data_stream:
name: logs-foobar
---
teardown:
- do:
indices.delete_data_stream:
name: logs-foobar
---
"Verify get index api":
- skip:
version: " - 7.8.99"
reason: "data streams only supported in 7.9+"
- do:
indices.get:
index: logs-foobar
- is_true: logs-foobar-000001
- is_false: logs-foobar
- match: { logs-foobar-000001.settings.index.number_of_shards: '1' }
---
"Verify get mapping api":
- skip:
version: " - 7.8.99"
reason: "data streams only supported in 7.9+"
- do:
indices.get_mapping:
index: logs-foobar
- is_true: logs-foobar-000001.mappings
- is_false: logs-foobar.mappings

View File

@ -235,11 +235,12 @@ public class DataStreamIT extends ESIntegTestCase {
verifyResolvability(dataStreamName, client().admin().indices().prepareUpgradeStatus(dataStreamName), false);
verifyResolvability(dataStreamName, getAliases(dataStreamName), true);
verifyResolvability(dataStreamName, getFieldMapping(dataStreamName), true);
verifyResolvability(dataStreamName, getMapping(dataStreamName), true);
verifyResolvability(dataStreamName, getMapping(dataStreamName), false);
verifyResolvability(dataStreamName, getSettings(dataStreamName), false);
verifyResolvability(dataStreamName, health(dataStreamName), false);
verifyResolvability(dataStreamName, client().admin().cluster().prepareState().setIndices(dataStreamName), false);
verifyResolvability(dataStreamName, client().prepareFieldCaps(dataStreamName).setFields("*"), false);
verifyResolvability(dataStreamName, client().admin().indices().prepareGetIndex().addIndices(dataStreamName), false);
request = new CreateDataStreamAction.Request("logs-barbaz");
client().admin().indices().createDataStream(request).actionGet();
@ -263,11 +264,12 @@ public class DataStreamIT extends ESIntegTestCase {
verifyResolvability(wildcardExpression, client().admin().indices().prepareUpgradeStatus(wildcardExpression), false);
verifyResolvability(wildcardExpression, getAliases(wildcardExpression), true);
verifyResolvability(wildcardExpression, getFieldMapping(wildcardExpression), true);
verifyResolvability(wildcardExpression, getMapping(wildcardExpression), true);
verifyResolvability(wildcardExpression, getMapping(wildcardExpression), false);
verifyResolvability(wildcardExpression, getSettings(wildcardExpression), false);
verifyResolvability(wildcardExpression, health(wildcardExpression), false);
verifyResolvability(wildcardExpression, client().admin().cluster().prepareState().setIndices(wildcardExpression), false);
verifyResolvability(wildcardExpression, client().prepareFieldCaps(wildcardExpression).setFields("*"), false);
verifyResolvability(wildcardExpression, client().admin().indices().prepareGetIndex().addIndices(wildcardExpression), false);
}
private static void verifyResolvability(String dataStream, ActionRequestBuilder requestBuilder, boolean fail) {

View File

@ -19,14 +19,11 @@
package org.elasticsearch.action.admin.indices.get;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.AliasMetadata;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -68,18 +65,6 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
this.indexScopedSettings = indexScopedSettings;
}
@Override
protected String executor() {
// very lightweight operation, no need to fork
return ThreadPool.Names.SAME;
}
@Override
protected ClusterBlockException checkBlock(GetIndexRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ,
indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
protected GetIndexResponse read(StreamInput in) throws IOException {
return new GetIndexResponse(in);

View File

@ -25,8 +25,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetadata;
import org.elasticsearch.cluster.service.ClusterService;
@ -54,18 +52,6 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMa
this.indicesService = indicesService;
}
@Override
protected String executor() {
// very lightweight operation, no need to fork
return ThreadPool.Names.SAME;
}
@Override
protected ClusterBlockException checkBlock(GetMappingsRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ,
indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
protected GetMappingsResponse read(StreamInput in) throws IOException {
return new GetMappingsResponse(in);

View File

@ -23,13 +23,14 @@ import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequest<Request>, Response extends ActionResponse>
extends TransportMasterNodeReadAction<Request, Response> {
@ -45,9 +46,15 @@ public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequ
return ThreadPool.Names.SAME;
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ,
indexNameExpressionResolver.concreteIndexNames(state, request, true));
}
@Override
protected final void masterOperation(final Request request, final ClusterState state, final ActionListener<Response> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request, true);
doMasterOperation(request, concreteIndices, state, listener);
}

View File

@ -122,12 +122,12 @@ public class GetIndexActionTests extends ESSingleNodeTestCase {
static class Resolver extends IndexNameExpressionResolver {
@Override
public String[] concreteIndexNames(ClusterState state, IndicesRequest request) {
public String[] concreteIndexNames(ClusterState state, IndicesRequest request, boolean includeDataStreams) {
return request.indices();
}
@Override
public Index[] concreteIndices(ClusterState state, IndicesRequest request) {
public Index[] concreteIndices(ClusterState state, IndicesRequest request, boolean includeDataStreams) {
Index[] out = new Index[request.indices().length];
for (int x = 0; x < out.length; x++) {
out[x] = new Index(request.indices()[x], "_na_");

View File

@ -11,8 +11,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
@ -59,23 +57,11 @@ public class TransportExplainLifecycleAction
this.indexLifecycleService = indexLifecycleService;
}
@Override
protected String executor() {
// very lightweight operation, no need to fork
return ThreadPool.Names.SAME;
}
@Override
protected ExplainLifecycleResponse read(StreamInput in) throws IOException {
return new ExplainLifecycleResponse(in);
}
@Override
protected ClusterBlockException checkBlock(ExplainLifecycleRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ,
indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
protected void doMasterOperation(ExplainLifecycleRequest request, String[] concreteIndices, ClusterState state,
ActionListener<ExplainLifecycleResponse> listener) {

View File

@ -1,8 +1,8 @@
---
"Verify data stream resolvability for xpack apis":
- skip:
version: " - 7.99.99"
reason: "mute bwc until backported"
version: " - 7.8.99"
reason: "data streams only supported in 7.9+"
features: allowed_warnings
- do:
@ -45,3 +45,67 @@
indices.delete_data_stream:
name: logs-foobar
- is_true: acknowledged
---
"Verify data stream resolvability in ilm explain api":
- skip:
version: " - 7.99.99"
reason: "wait until backported"
features: allowed_warnings
- do:
ilm.put_lifecycle:
policy: "my_lifecycle"
body: |
{
"policy": {
"phases": {
"warm": {
"min_age": "1000s",
"actions": {
"forcemerge": {
"max_num_segments": 10000
}
}
},
"hot": {
"min_age": "1000s",
"actions": { }
}
}
}
}
- do:
allowed_warnings:
- "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation"
indices.put_index_template:
name: generic_logs_template
body:
index_patterns: logs-*
data_stream:
timestamp_field: '@timestamp'
template:
settings:
index.lifecycle.name: "my_lifecycle"
mappings:
properties:
'@timestamp':
type: date
- do:
index:
index: logs-foobar
refresh: true
body: { foo: bar }
- do:
ilm.explain_lifecycle:
index: logs-foobar
- is_false: indices.logs-foobar.managed
- is_true: indices.logs-foobar-000001.managed
- do:
indices.delete_data_stream:
name: logs-foobar
- is_true: acknowledged