mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-25 09:28:27 +00:00
Make data streams a basic licensed feature. (#59392)
Backport of #59293 to 7.x branch. * Create new data-stream xpack module. * Move TimestampFieldMapper to the new module, this results in storing a composable index template with data stream definition only to work with default distribution. This way data streams can only be used with default distribution, since a data stream can currently only be created if a matching composable index template exists with a data stream definition. * Renamed `_timestamp` meta field mapper to `_data_stream_timestamp` meta field mapper. * Add logic to put composable index template api to fail if `_data_stream_timestamp` meta field mapper isn't registered. So that a more understandable error is returned when attempting to store a template with data stream definition via the oss distribution. In a follow up the data stream transport and rest actions can be moved to the xpack data-stream module.
This commit is contained in:
parent
cc9166a5ea
commit
b1b7bf3912
@ -1,100 +0,0 @@
|
||||
"Verify rank eval with data streams":
|
||||
- skip:
|
||||
version: " - 7.99.99"
|
||||
reason: "change to 7.8.99 after backport"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template
|
||||
body:
|
||||
index_patterns: [logs-*]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: logs-foobar
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: logs-foobar
|
||||
id: doc1
|
||||
op_type: create
|
||||
body: { "text": "berlin" }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: logs-foobar
|
||||
id: doc2
|
||||
op_type: create
|
||||
body: { "text": "amsterdam" }
|
||||
|
||||
# rollover data stream to split documents across multiple backing indices
|
||||
- do:
|
||||
indices.rollover:
|
||||
alias: "logs-foobar"
|
||||
|
||||
- match: { old_index: .ds-logs-foobar-000001 }
|
||||
- match: { new_index: .ds-logs-foobar-000002 }
|
||||
- match: { rolled_over: true }
|
||||
- match: { dry_run: false }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: logs-foobar
|
||||
id: doc3
|
||||
op_type: create
|
||||
body: { "text": "amsterdam" }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: logs-foobar
|
||||
id: doc4
|
||||
op_type: create
|
||||
body: { "text": "something about amsterdam and berlin" }
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: logs-foobar
|
||||
|
||||
- do:
|
||||
rank_eval:
|
||||
index: logs-foobar
|
||||
search_type: query_then_fetch
|
||||
body: {
|
||||
"requests" : [
|
||||
{
|
||||
"id": "amsterdam_query",
|
||||
"request": { "query": { "match" : {"text" : "amsterdam" }}},
|
||||
"ratings": [
|
||||
{"_index": ".ds-logs-foobar-000001", "_id": "doc1", "rating": 0},
|
||||
{"_index": ".ds-logs-foobar-000001", "_id": "doc2", "rating": 1},
|
||||
{"_index": ".ds-logs-foobar-000002", "_id": "doc3", "rating": 1}]
|
||||
},
|
||||
{
|
||||
"id" : "berlin_query",
|
||||
"request": { "query": { "match" : { "text" : "berlin" } }, "size" : 10 },
|
||||
"ratings": [{"_index": ".ds-logs-foobar-000001", "_id": "doc1", "rating": 1}]
|
||||
}
|
||||
],
|
||||
"metric" : { "precision": { "ignore_unlabeled" : true }}
|
||||
}
|
||||
|
||||
- match: { metric_score: 1}
|
||||
- match: { details.amsterdam_query.metric_score: 1.0}
|
||||
- length: { details.amsterdam_query.hits: 3}
|
||||
- match: { details.berlin_query.metric_score: 1.0}
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: logs-foobar
|
||||
- is_true: acknowledged
|
@ -19,8 +19,6 @@
|
||||
|
||||
package org.elasticsearch.upgrades;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Request;
|
||||
@ -28,16 +26,12 @@ import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.WarningFailureException;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.MetadataIndexStateService;
|
||||
import org.elasticsearch.cluster.metadata.Template;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
@ -56,7 +50,6 @@ import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Base64;
|
||||
import java.util.Collection;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
@ -1464,55 +1457,12 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
||||
assertTotalHits(numDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testDataStreams() throws Exception {
|
||||
assumeTrue("no data streams in versions before " + Version.V_7_9_0, getOldClusterVersion().onOrAfter(Version.V_7_9_0));
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
String mapping = "{\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"@timestamp\": {\n" +
|
||||
" \"type\": \"date\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
Template template = new Template(null, new CompressedXContent(mapping), null);
|
||||
createComposableTemplate(client(), "dst", "ds", template);
|
||||
|
||||
Request indexRequest = new Request("POST", "/ds/_doc/1?op_type=create&refresh");
|
||||
XContentBuilder builder = JsonXContent.contentBuilder().startObject()
|
||||
.field("f", "v")
|
||||
.field("@timestamp", new Date())
|
||||
.endObject();
|
||||
indexRequest.setJsonEntity(Strings.toString(builder));
|
||||
assertOK(client().performRequest(indexRequest));
|
||||
}
|
||||
|
||||
Request getDataStream = new Request("GET", "/_data_stream/ds");
|
||||
Response response = client().performRequest(getDataStream);
|
||||
assertOK(response);
|
||||
List<Object> dataStreams = (List<Object>) entityAsMap(response).get("data_streams");
|
||||
assertEquals(1, dataStreams.size());
|
||||
Map<String, Object> ds = (Map<String, Object>) dataStreams.get(0);
|
||||
List<Map<String, String>> indices = (List<Map<String, String>>) ds.get("indices");
|
||||
assertEquals("ds", ds.get("name"));
|
||||
assertEquals(1, indices.size());
|
||||
assertEquals(DataStream.getDefaultBackingIndexName("ds", 1), indices.get(0).get("index_name"));
|
||||
assertTotalHits(1, entityAsMap(client().performRequest(new Request("GET", "/ds/_search"))));
|
||||
public static void assertNumHits(String index, int numHits, int totalShards) throws IOException {
|
||||
Map<String, Object> resp = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")));
|
||||
assertNoFailures(resp);
|
||||
assertThat(XContentMapValues.extractValue("_shards.total", resp), equalTo(totalShards));
|
||||
assertThat(XContentMapValues.extractValue("_shards.successful", resp), equalTo(totalShards));
|
||||
assertThat(extractTotalHits(resp), equalTo(numHits));
|
||||
}
|
||||
|
||||
private static void createComposableTemplate(RestClient client, String templateName, String indexPattern, Template template)
|
||||
throws IOException {
|
||||
XContentBuilder builder = jsonBuilder();
|
||||
template.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
StringEntity templateJSON = new StringEntity(
|
||||
String.format(Locale.ROOT, "{\n" +
|
||||
" \"index_patterns\": \"%s\",\n" +
|
||||
" \"data_stream\": { \"timestamp_field\": \"@timestamp\" },\n" +
|
||||
" \"template\": %s\n" +
|
||||
"}", indexPattern, Strings.toString(builder)),
|
||||
ContentType.APPLICATION_JSON);
|
||||
Request createIndexTemplateRequest = new Request("PUT", "_index_template/" + templateName);
|
||||
createIndexTemplateRequest.setEntity(templateJSON);
|
||||
client.performRequest(createIndexTemplateRequest);
|
||||
}
|
||||
}
|
||||
|
@ -3,47 +3,6 @@
|
||||
- skip:
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template2] has index patterns [simple-data-stream2] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template2] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template2
|
||||
body:
|
||||
index_patterns: [simple-data-stream2]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream2
|
||||
|
||||
- do:
|
||||
indices.rollover:
|
||||
alias: "simple-data-stream2"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: single_doc_index
|
||||
@ -123,18 +82,6 @@
|
||||
nested2:
|
||||
type: keyword
|
||||
doc_values: false
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: closed_index
|
||||
body:
|
||||
aliases:
|
||||
aliased_closed_index: {}
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: closed_index
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
|
@ -109,47 +109,3 @@ setup:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 6
|
||||
|
||||
---
|
||||
"Prohibit clone on data stream's write index":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.clone:
|
||||
index: ".ds-simple-data-stream1-000001"
|
||||
target: "target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 2
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
@ -1,64 +0,0 @@
|
||||
---
|
||||
"Test apis that do not supported data streams":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template
|
||||
body:
|
||||
index_patterns: [logs-*]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: logs-foobar
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: logs-foobar
|
||||
refresh: true
|
||||
body:
|
||||
'@timestamp': '2020-12-12'
|
||||
foo: bar
|
||||
- match: {_index: .ds-logs-foobar-000001}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: logs-foobar
|
||||
body: { query: { match_all: {} } }
|
||||
- length: { hits.hits: 1 }
|
||||
- match: { hits.hits.0._index: .ds-logs-foobar-000001 }
|
||||
- match: { hits.hits.0._source.foo: 'bar' }
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
indices.delete:
|
||||
index: logs-foobar
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: logs-foobar
|
||||
- is_true: acknowledged
|
||||
|
||||
---
|
||||
"APIs temporarily muted":
|
||||
- skip:
|
||||
version: "all"
|
||||
reason: "restore to above test after data stream resolution PRs have been merged"
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.close:
|
||||
index: logs-*
|
@ -1,53 +0,0 @@
|
||||
---
|
||||
setup:
|
||||
- skip:
|
||||
features: allowed_warnings
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [logs_template] has index patterns [logs-foobar] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs_template] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: logs_template
|
||||
body:
|
||||
index_patterns: logs-foobar
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: logs-foobar
|
||||
|
||||
---
|
||||
teardown:
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: logs-foobar
|
||||
|
||||
---
|
||||
"Verify get index api":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
|
||||
- do:
|
||||
indices.get:
|
||||
index: logs-foobar
|
||||
- is_true: \.ds-logs-foobar-000001
|
||||
- is_false: logs-foobar
|
||||
- match: { \.ds-logs-foobar-000001.settings.index.number_of_shards: '1' }
|
||||
|
||||
---
|
||||
"Verify get mapping api":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
index: logs-foobar
|
||||
- is_true: \.ds-logs-foobar-000001.mappings
|
||||
- is_false: \.ds-logs-foobar.mappings
|
@ -1,38 +0,0 @@
|
||||
---
|
||||
"Data streams":
|
||||
- skip:
|
||||
features: allowed_warnings
|
||||
version: " - 7.99.99"
|
||||
reason: "change to 7.8.99 after backport"
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
indices.get_field_mapping:
|
||||
index: simple-data-stream1
|
||||
fields: foo
|
||||
|
||||
- is_true: \.ds-simple-data-stream1-000001
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
@ -118,109 +118,3 @@
|
||||
- match: { indices.index_1.closed: true }
|
||||
- match: { indices.index_2.closed: true }
|
||||
- match: { indices.index_3.closed: true }
|
||||
|
||||
---
|
||||
"Close write index for data stream fails":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.close:
|
||||
index: ".ds-simple-data-stream1-000001"
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
---
|
||||
"Open write index for data stream opens all backing indices":
|
||||
- skip:
|
||||
version: " - 7.99.99"
|
||||
reason: "change to - 7.8.99 after backport"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
# rollover data stream twice to create new backing indices
|
||||
- do:
|
||||
indices.rollover:
|
||||
alias: "simple-data-stream1"
|
||||
|
||||
- match: { old_index: .ds-simple-data-stream1-000001 }
|
||||
- match: { new_index: .ds-simple-data-stream1-000002 }
|
||||
- match: { rolled_over: true }
|
||||
- match: { dry_run: false }
|
||||
|
||||
- do:
|
||||
indices.rollover:
|
||||
alias: "simple-data-stream1"
|
||||
|
||||
- match: { old_index: .ds-simple-data-stream1-000002 }
|
||||
- match: { new_index: .ds-simple-data-stream1-000003 }
|
||||
- match: { rolled_over: true }
|
||||
- match: { dry_run: false }
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: ".ds-simple-data-stream1-000001,.ds-simple-data-stream1-000002"
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
indices.open:
|
||||
index: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
# all closed backing indices should be re-opened and returned
|
||||
- do:
|
||||
indices.get:
|
||||
index: ".ds-simple-data-stream1-*"
|
||||
|
||||
- is_true: \.ds-simple-data-stream1-000001.settings
|
||||
- is_true: \.ds-simple-data-stream1-000002.settings
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
@ -5,47 +5,6 @@ setup:
|
||||
reason: "resolve index api only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template2] has index patterns [simple-data-stream2] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template2] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template2
|
||||
body:
|
||||
index_patterns: [simple-data-stream2]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp2':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp2'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream2
|
||||
|
||||
- do:
|
||||
indices.rollover:
|
||||
alias: "simple-data-stream2"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index1
|
||||
@ -74,7 +33,7 @@ setup:
|
||||
test_blias: {}
|
||||
|
||||
---
|
||||
"Resolve index with indices, aliases, and data streams":
|
||||
"Resolve index with indices and aliases":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "resolve index api only supported in 7.9+"
|
||||
@ -99,23 +58,7 @@ setup:
|
||||
- match: {aliases.1.indices.1: test_index3}
|
||||
- match: {aliases.2.name: test_clias}
|
||||
- match: {aliases.2.indices.0: test_index1}
|
||||
- match: {data_streams.0.name: simple-data-stream1}
|
||||
- match: {data_streams.0.backing_indices.0: .ds-simple-data-stream1-000001}
|
||||
- match: {data_streams.0.timestamp_field: "@timestamp"}
|
||||
- match: {data_streams.1.name: simple-data-stream2}
|
||||
- match: {data_streams.1.backing_indices.0: .ds-simple-data-stream2-000001}
|
||||
- match: {data_streams.1.backing_indices.1: .ds-simple-data-stream2-000002}
|
||||
- match: {data_streams.1.timestamp_field: "@timestamp"}
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream2
|
||||
- is_true: acknowledged
|
||||
- length: {data_streams: 0}
|
||||
|
||||
---
|
||||
"Resolve index with hidden and closed indices":
|
||||
@ -159,20 +102,4 @@ setup:
|
||||
- match: {aliases.1.indices.1: test_index3}
|
||||
- match: {aliases.2.name: test_clias}
|
||||
- match: {aliases.2.indices.0: test_index1}
|
||||
- match: {data_streams.0.name: simple-data-stream1}
|
||||
- match: {data_streams.0.backing_indices.0: .ds-simple-data-stream1-000001}
|
||||
- match: {data_streams.0.timestamp_field: "@timestamp"}
|
||||
- match: {data_streams.1.name: simple-data-stream2}
|
||||
- match: {data_streams.1.backing_indices.0: .ds-simple-data-stream2-000001}
|
||||
- match: {data_streams.1.backing_indices.1: .ds-simple-data-stream2-000002}
|
||||
- match: {data_streams.1.timestamp_field: "@timestamp"}
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream2
|
||||
- is_true: acknowledged
|
||||
- length: {data_streams: 0}
|
||||
|
@ -79,60 +79,3 @@
|
||||
- match: { indices.index1.shards.0.stores.0.allocation: "primary" }
|
||||
- match: { indices.index2.shards.0.stores.0.allocation: "primary" }
|
||||
- match: { indices.index2.shards.1.stores.0.allocation: "primary" }
|
||||
|
||||
---
|
||||
"Data streams test":
|
||||
- skip:
|
||||
version: " - 7.99.99"
|
||||
reason: "change to 7.8.99 after backport"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
settings:
|
||||
number_of_shards: "1"
|
||||
number_of_replicas: "0"
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
# rollover data stream to create new backing index
|
||||
- do:
|
||||
indices.rollover:
|
||||
alias: "simple-data-stream1"
|
||||
|
||||
- match: { old_index: .ds-simple-data-stream1-000001 }
|
||||
- match: { new_index: .ds-simple-data-stream1-000002 }
|
||||
- match: { rolled_over: true }
|
||||
- match: { dry_run: false }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.shard_stores:
|
||||
index: simple-data-stream1
|
||||
status: "green"
|
||||
|
||||
- match: { indices.\.ds-simple-data-stream1-000001.shards.0.stores.0.allocation: "primary" }
|
||||
- match: { indices.\.ds-simple-data-stream1-000002.shards.0.stores.0.allocation: "primary" }
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
@ -81,46 +81,3 @@
|
||||
- match: { _type: _doc }
|
||||
- match: { _id: "1" }
|
||||
- match: { _source: { foo: "hello world" } }
|
||||
|
||||
---
|
||||
"Prohibit shrink on data stream's write index":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.shrink:
|
||||
index: ".ds-simple-data-stream1-000001"
|
||||
target: "target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
@ -221,47 +221,3 @@ setup:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 6
|
||||
|
||||
---
|
||||
"Prohibit split on data stream's write index":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.split:
|
||||
index: ".ds-simple-data-stream1-000001"
|
||||
target: "target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 4
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
@ -99,41 +99,3 @@
|
||||
- match: { shards.0.0.index: test_index }
|
||||
- match: { indices.test_index.aliases: [test_alias_no_filter]}
|
||||
- is_false: indices.test_index.filter
|
||||
|
||||
---
|
||||
"Search shards on data streams":
|
||||
- skip:
|
||||
features: allowed_warnings
|
||||
version: " - 7.99.99"
|
||||
reason: "change to 7.8.99 after backport"
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
search_shards:
|
||||
index: "simple-data-stream1"
|
||||
|
||||
- match: { shards.0.0.index: ".ds-simple-data-stream1-000001" }
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
@ -24,26 +24,14 @@ import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.datastream.DeleteDataStreamAction;
|
||||
import org.elasticsearch.action.admin.indices.datastream.GetDataStreamAction;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Template;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.ingest.IngestTestPlugin;
|
||||
@ -56,26 +44,20 @@ import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.DocWriteRequest.OpType.CREATE;
|
||||
import static org.elasticsearch.action.DocWriteResponse.Result.CREATED;
|
||||
import static org.elasticsearch.action.DocWriteResponse.Result.UPDATED;
|
||||
import static org.elasticsearch.cluster.metadata.MetadataCreateDataStreamServiceTests.generateMapping;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.arrayWithSize;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.hasItemInArray;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.oneOf;
|
||||
|
||||
@ -221,86 +203,4 @@ public class BulkIntegrationIT extends ESIntegTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testMixedAutoCreate() throws Exception {
|
||||
PutComposableIndexTemplateAction.Request createTemplateRequest = new PutComposableIndexTemplateAction.Request("logs-foo");
|
||||
createTemplateRequest.indexTemplate(
|
||||
new ComposableIndexTemplate(
|
||||
Collections.singletonList("logs-foo*"),
|
||||
new Template(null, new CompressedXContent(generateMapping("@timestamp")), null),
|
||||
null, null, null, null,
|
||||
new ComposableIndexTemplate.DataStreamTemplate("@timestamp"))
|
||||
);
|
||||
client().execute(PutComposableIndexTemplateAction.INSTANCE, createTemplateRequest).actionGet();
|
||||
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest("logs-foobar").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("logs-foobaz").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("logs-barbaz").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("logs-barfoo").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet();
|
||||
assertThat("bulk failures: " + Strings.toString(bulkResponse), bulkResponse.hasFailures(), is(false));
|
||||
|
||||
bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest("logs-foobar").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("logs-foobaz2").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("logs-barbaz").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("logs-barfoo2").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkResponse = client().bulk(bulkRequest).actionGet();
|
||||
assertThat("bulk failures: " + Strings.toString(bulkResponse), bulkResponse.hasFailures(), is(false));
|
||||
|
||||
bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest("logs-foobar").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("logs-foobaz2").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("logs-foobaz3").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("logs-barbaz").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("logs-barfoo2").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("logs-barfoo3").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON));
|
||||
bulkResponse = client().bulk(bulkRequest).actionGet();
|
||||
assertThat("bulk failures: " + Strings.toString(bulkResponse), bulkResponse.hasFailures(), is(false));
|
||||
|
||||
GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[]{"*"});
|
||||
GetDataStreamAction.Response getDataStreamsResponse = client().admin().indices().getDataStreams(getDataStreamRequest).actionGet();
|
||||
assertThat(getDataStreamsResponse.getDataStreams(), hasSize(4));
|
||||
getDataStreamsResponse.getDataStreams().sort(Comparator.comparing(dataStreamInfo -> dataStreamInfo.getDataStream().getName()));
|
||||
assertThat(getDataStreamsResponse.getDataStreams().get(0).getDataStream().getName(), equalTo("logs-foobar"));
|
||||
assertThat(getDataStreamsResponse.getDataStreams().get(1).getDataStream().getName(), equalTo("logs-foobaz"));
|
||||
assertThat(getDataStreamsResponse.getDataStreams().get(2).getDataStream().getName(), equalTo("logs-foobaz2"));
|
||||
assertThat(getDataStreamsResponse.getDataStreams().get(3).getDataStream().getName(), equalTo("logs-foobaz3"));
|
||||
|
||||
GetIndexResponse getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest().indices("logs-bar*")).actionGet();
|
||||
assertThat(getIndexResponse.getIndices(), arrayWithSize(4));
|
||||
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barbaz"));
|
||||
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barfoo"));
|
||||
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barfoo2"));
|
||||
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barfoo3"));
|
||||
|
||||
DeleteDataStreamAction.Request deleteDSReq = new DeleteDataStreamAction.Request(new String[]{"*"});
|
||||
client().execute(DeleteDataStreamAction.INSTANCE, deleteDSReq).actionGet();
|
||||
DeleteComposableIndexTemplateAction.Request deleteTemplateRequest = new DeleteComposableIndexTemplateAction.Request("*");
|
||||
client().execute(DeleteComposableIndexTemplateAction.INSTANCE, deleteTemplateRequest).actionGet();
|
||||
}
|
||||
|
||||
public void testAutoCreateV1TemplateNoDataStream() {
|
||||
Settings settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build();
|
||||
|
||||
PutIndexTemplateRequest v1Request = new PutIndexTemplateRequest("logs-foo");
|
||||
v1Request.patterns(Collections.singletonList("logs-foo*"));
|
||||
v1Request.settings(settings);
|
||||
v1Request.order(Integer.MAX_VALUE); // in order to avoid number_of_replicas being overwritten by random_template
|
||||
client().admin().indices().putTemplate(v1Request).actionGet();
|
||||
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest("logs-foobar").opType(CREATE).source("{}", XContentType.JSON));
|
||||
BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet();
|
||||
assertThat("bulk failures: " + Strings.toString(bulkResponse), bulkResponse.hasFailures(), is(false));
|
||||
|
||||
GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[]{"*"});
|
||||
GetDataStreamAction.Response getDataStreamsResponse = client().admin().indices().getDataStreams(getDataStreamRequest).actionGet();
|
||||
assertThat(getDataStreamsResponse.getDataStreams(), hasSize(0));
|
||||
|
||||
GetIndexResponse getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest().indices("logs-foobar")).actionGet();
|
||||
assertThat(getIndexResponse.getIndices(), arrayWithSize(1));
|
||||
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-foobar"));
|
||||
assertThat(getIndexResponse.getSettings().get("logs-foobar").get(IndexMetadata.SETTING_NUMBER_OF_REPLICAS), equalTo("0"));
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,6 @@
|
||||
package org.elasticsearch.indices;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistRequestBuilder;
|
||||
@ -30,12 +29,10 @@ import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
|
||||
import org.elasticsearch.action.search.MultiSearchRequestBuilder;
|
||||
@ -48,7 +45,6 @@ import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
@ -706,20 +702,10 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
||||
return client().admin().indices().prepareGetMappings(indices);
|
||||
}
|
||||
|
||||
static PutMappingRequestBuilder putMapping(String source, String... indices) {
|
||||
return client().admin().indices().preparePutMapping(indices)
|
||||
.setType("_doc")
|
||||
.setSource(source, XContentType.JSON);
|
||||
}
|
||||
|
||||
static GetSettingsRequestBuilder getSettings(String... indices) {
|
||||
return client().admin().indices().prepareGetSettings(indices);
|
||||
}
|
||||
|
||||
static UpdateSettingsRequestBuilder updateSettings(Settings.Builder settings, String... indices) {
|
||||
return client().admin().indices().prepareUpdateSettings(indices).setSettings(settings);
|
||||
}
|
||||
|
||||
private static CreateSnapshotRequestBuilder snapshot(String name, String... indices) {
|
||||
return client().admin().cluster().prepareCreateSnapshot("dummy-repo", name).setWaitForCompletion(true).setIndices(indices);
|
||||
}
|
||||
@ -731,15 +717,11 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
||||
.setIndices(indices);
|
||||
}
|
||||
|
||||
static ClusterHealthRequestBuilder health(String... indices) {
|
||||
return client().admin().cluster().prepareHealth(indices);
|
||||
}
|
||||
|
||||
private static void verify(ActionRequestBuilder requestBuilder, boolean fail) {
|
||||
private static void verify(ActionRequestBuilder<?, ?> requestBuilder, boolean fail) {
|
||||
verify(requestBuilder, fail, 0);
|
||||
}
|
||||
|
||||
private static void verify(ActionRequestBuilder requestBuilder, boolean fail, long expectedCount) {
|
||||
private static void verify(ActionRequestBuilder<?, ?> requestBuilder, boolean fail, long expectedCount) {
|
||||
if (fail) {
|
||||
if (requestBuilder instanceof MultiSearchRequestBuilder) {
|
||||
MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get();
|
||||
|
@ -24,11 +24,19 @@ import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTem
|
||||
import org.elasticsearch.cluster.metadata.ComponentTemplate;
|
||||
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
|
||||
import org.elasticsearch.cluster.metadata.Template;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.collect.List;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class ComposableTemplateIT extends ESIntegTestCase {
|
||||
|
||||
// See: https://github.com/elastic/elasticsearch/issues/58643
|
||||
@ -83,4 +91,23 @@ public class ComposableTemplateIT extends ESIntegTestCase {
|
||||
client().execute(PutComposableIndexTemplateAction.INSTANCE,
|
||||
new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit2)).get();
|
||||
}
|
||||
|
||||
public void testUsageOfDataStreamFails() throws IOException {
|
||||
// Exception that would happen if a unknown field is provided in a composable template:
|
||||
// The thrown exception will be used to compare against the exception that is thrown when providing
|
||||
// a composable template with a data stream definition.
|
||||
String content = "{\"index_patterns\":[\"logs-*-*\"],\"my_field\":\"bla\"}";
|
||||
XContentParser parser =
|
||||
XContentHelper.createParser(xContentRegistry(), null, new BytesArray(content), XContentType.JSON);
|
||||
Exception expectedException = expectThrows(Exception.class, () -> ComposableIndexTemplate.parse(parser));
|
||||
|
||||
ComposableIndexTemplate template = new ComposableIndexTemplate(List.of("logs-*-*"), null, null, null, null,
|
||||
null, new ComposableIndexTemplate.DataStreamTemplate("@timestamp"));
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> client().execute(PutComposableIndexTemplateAction.INSTANCE,
|
||||
new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(template)).actionGet());
|
||||
Exception actualException = (Exception) e.getCause();
|
||||
assertThat(actualException.getMessage(),
|
||||
equalTo(expectedException.getMessage().replace("[1:32] ", "").replace("my_field", "data_stream")));
|
||||
assertThat(actualException.getMessage(), equalTo("[index_template] unknown field [data_stream]"));
|
||||
}
|
||||
}
|
||||
|
@ -19,20 +19,15 @@
|
||||
|
||||
package org.elasticsearch.recovery;
|
||||
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.indices.datastream.CreateDataStreamAction;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Template;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
@ -40,12 +35,6 @@ import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
|
||||
@ -68,50 +57,17 @@ public class FullRollingRestartIT extends ESIntegTestCase {
|
||||
internalCluster().startNode();
|
||||
createIndex("test");
|
||||
|
||||
String mapping = "{\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"@timestamp\": {\n" +
|
||||
" \"type\": \"date\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id_1");
|
||||
Settings settings = Settings.builder().put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), timeValueSeconds(5)).build();
|
||||
request.indexTemplate(
|
||||
new ComposableIndexTemplate(
|
||||
Collections.singletonList("ds"),
|
||||
new Template(settings, new CompressedXContent(mapping), null),
|
||||
null, null, null, null,
|
||||
new ComposableIndexTemplate.DataStreamTemplate("@timestamp"))
|
||||
);
|
||||
client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet();
|
||||
client().admin().indices().createDataStream(new CreateDataStreamAction.Request("ds")).actionGet();
|
||||
|
||||
final String healthTimeout = "2m";
|
||||
final String healthTimeout = "1m";
|
||||
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
client().prepareIndex("test", "type1", Long.toString(i))
|
||||
.setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
|
||||
}
|
||||
for (int i = 2000; i < 3000; i++) {
|
||||
Map<String, Object> source = MapBuilder.<String, Object>newMapBuilder()
|
||||
.put("test", "value" + i)
|
||||
.put("@timestamp", new Date()).map();
|
||||
client().prepareIndex("ds", "_doc").setId(Long.toString(i)).setOpType(DocWriteRequest.OpType.CREATE)
|
||||
.setSource(source).execute().actionGet();
|
||||
}
|
||||
flush();
|
||||
for (int i = 1000; i < 2000; i++) {
|
||||
client().prepareIndex("test", "type1", Long.toString(i))
|
||||
.setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
|
||||
}
|
||||
for (int i = 3000; i < 4000; i++) {
|
||||
Map<String, Object> source = MapBuilder.<String, Object>newMapBuilder()
|
||||
.put("test", "value" + i)
|
||||
.put("@timestamp", new Date()).map();
|
||||
client().prepareIndex("ds", "_doc").setId(Long.toString(i)).setOpType(DocWriteRequest.OpType.CREATE)
|
||||
.setSource(source).execute().actionGet();
|
||||
}
|
||||
|
||||
logger.info("--> now start adding nodes");
|
||||
internalCluster().startNode();
|
||||
@ -132,8 +88,7 @@ public class FullRollingRestartIT extends ESIntegTestCase {
|
||||
logger.info("--> refreshing and checking data");
|
||||
refresh();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 4000L);
|
||||
assertHitCount(client().prepareSearch().setIndices("ds").setSize(0).setQuery(matchAllQuery()).get(), 2000L);
|
||||
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L);
|
||||
}
|
||||
|
||||
// now start shutting nodes down
|
||||
@ -150,8 +105,7 @@ public class FullRollingRestartIT extends ESIntegTestCase {
|
||||
logger.info("--> stopped two nodes, verifying data");
|
||||
refresh();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 4000L);
|
||||
assertHitCount(client().prepareSearch().setIndices("ds").setSize(0).setQuery(matchAllQuery()).get(), 2000L);
|
||||
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L);
|
||||
}
|
||||
|
||||
// closing the 3rd node
|
||||
@ -169,8 +123,7 @@ public class FullRollingRestartIT extends ESIntegTestCase {
|
||||
logger.info("--> one node left, verifying data");
|
||||
refresh();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 4000L);
|
||||
assertHitCount(client().prepareSearch().setIndices("ds").setSize(0).setQuery(matchAllQuery()).get(), 2000L);
|
||||
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L);
|
||||
}
|
||||
}
|
||||
|
||||
@ -186,7 +139,7 @@ public class FullRollingRestartIT extends ESIntegTestCase {
|
||||
*/
|
||||
prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "6")
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0")
|
||||
.put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMinutes(1))).get();
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMinutes(1))).get();
|
||||
|
||||
for (int i = 0; i < 100; i++) {
|
||||
client().prepareIndex("test", "type1", Long.toString(i))
|
||||
|
@ -24,7 +24,6 @@ import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
||||
@ -35,7 +34,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse;
|
||||
import org.elasticsearch.action.admin.indices.datastream.DeleteDataStreamAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
@ -52,7 +50,6 @@ import org.elasticsearch.cluster.RestoreInProgress;
|
||||
import org.elasticsearch.cluster.SnapshotsInProgress;
|
||||
import org.elasticsearch.cluster.SnapshotsInProgress.State;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetadata;
|
||||
import org.elasticsearch.cluster.metadata.MetadataIndexStateService;
|
||||
@ -79,7 +76,6 @@ import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.EngineTestCase;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.DataStreamIT;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidIndexNameException;
|
||||
import org.elasticsearch.ingest.IngestTestPlugin;
|
||||
@ -134,7 +130,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFa
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
@ -2237,56 +2232,6 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteDataStreamDuringSnapshot() throws Exception {
|
||||
Client client = client();
|
||||
|
||||
createRepository("test-repo", "mock", Settings.builder()
|
||||
.put("location", randomRepoPath()).put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
|
||||
.put("block_on_data", true));
|
||||
|
||||
|
||||
String dataStream = "datastream";
|
||||
DataStreamIT.putComposableIndexTemplate("dst", "@timestamp", Collections.singletonList(dataStream));
|
||||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
client.prepareIndex(dataStream, "_doc")
|
||||
.setOpType(DocWriteRequest.OpType.CREATE)
|
||||
.setId(Integer.toString(i))
|
||||
.setSource(Collections.singletonMap("@timestamp", "2020-12-12"))
|
||||
.execute().actionGet();
|
||||
}
|
||||
refresh();
|
||||
assertDocCount(dataStream, 100L);
|
||||
|
||||
logger.info("--> snapshot");
|
||||
ActionFuture<CreateSnapshotResponse> future = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
|
||||
.setIndices(dataStream).setWaitForCompletion(true).setPartial(false).execute();
|
||||
logger.info("--> wait for block to kick in");
|
||||
waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1));
|
||||
|
||||
// non-partial snapshots do not allow delete operations on data streams where snapshot has not been completed
|
||||
try {
|
||||
logger.info("--> delete index while non-partial snapshot is running");
|
||||
client.admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[]{dataStream})).actionGet();
|
||||
fail("Expected deleting index to fail during snapshot");
|
||||
} catch (SnapshotInProgressException e) {
|
||||
assertThat(e.getMessage(), containsString("Cannot delete data streams that are being snapshotted: ["+dataStream));
|
||||
} finally {
|
||||
logger.info("--> unblock all data nodes");
|
||||
unblockAllDataNodes("test-repo");
|
||||
}
|
||||
logger.info("--> waiting for snapshot to finish");
|
||||
CreateSnapshotResponse createSnapshotResponse = future.get();
|
||||
|
||||
logger.info("Snapshot successfully completed");
|
||||
SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo();
|
||||
assertThat(snapshotInfo.state(), equalTo((SnapshotState.SUCCESS)));
|
||||
assertThat(snapshotInfo.dataStreams(), contains(dataStream));
|
||||
assertThat(snapshotInfo.indices(), contains(DataStream.getDefaultBackingIndexName(dataStream, 1)));
|
||||
}
|
||||
|
||||
public void testCloseOrDeleteIndexDuringSnapshot() throws Exception {
|
||||
disableRepoConsistencyCheck("This test intentionally leaves a broken repository");
|
||||
|
||||
|
@ -33,7 +33,6 @@ import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.TimestampFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
@ -278,10 +277,10 @@ public class ComposableIndexTemplate extends AbstractDiffable<ComposableIndexTem
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a mapping snippet for a backing index with `_timestamp` meta field mapper properly configured.
|
||||
* @return a mapping snippet for a backing index with `_data_stream_timestamp` meta field mapper properly configured.
|
||||
*/
|
||||
public Map<String, Object> getDataSteamMappingSnippet() {
|
||||
return singletonMap(MapperService.SINGLE_MAPPING_NAME, singletonMap(TimestampFieldMapper.NAME,
|
||||
return singletonMap(MapperService.SINGLE_MAPPING_NAME, singletonMap("_data_stream_timestamp",
|
||||
singletonMap("path", timestampField)));
|
||||
}
|
||||
|
||||
|
@ -34,12 +34,16 @@ import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ObjectPath;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
public class MetadataCreateDataStreamService {
|
||||
@ -169,12 +173,16 @@ public class MetadataCreateDataStreamService {
|
||||
return composableIndexTemplate;
|
||||
}
|
||||
|
||||
public static void validateTimestampFieldMapping(String timestampFieldName, MapperService mapperService) {
|
||||
TimestampFieldMapper fieldMapper = (TimestampFieldMapper) mapperService.documentMapper().mappers().getMapper("_timestamp");
|
||||
assert fieldMapper != null : "[_timestamp] meta field mapper must exist";
|
||||
public static void validateTimestampFieldMapping(String timestampFieldName, MapperService mapperService) throws IOException {
|
||||
MetadataFieldMapper fieldMapper =
|
||||
(MetadataFieldMapper) mapperService.documentMapper().mappers().getMapper("_data_stream_timestamp");
|
||||
assert fieldMapper != null : "[_data_stream_timestamp] meta field mapper must exist";
|
||||
|
||||
if (timestampFieldName.equals(fieldMapper.getPath()) == false) {
|
||||
throw new IllegalArgumentException("[_timestamp] meta field doesn't point to data stream timestamp field [" +
|
||||
Map<String, Object> parsedTemplateMapping =
|
||||
MapperService.parseMapping(NamedXContentRegistry.EMPTY, mapperService.documentMapper().mappingSource().string());
|
||||
String configuredPath = ObjectPath.eval("_doc._data_stream_timestamp.path", parsedTemplateMapping);
|
||||
if (timestampFieldName.equals(configuredPath) == false) {
|
||||
throw new IllegalArgumentException("[_data_stream_timestamp] meta field doesn't point to data stream timestamp field [" +
|
||||
timestampFieldName + "]");
|
||||
}
|
||||
|
||||
|
@ -50,6 +50,7 @@ import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParseException;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
@ -1100,6 +1101,17 @@ public class MetadataIndexTemplateService {
|
||||
// triggers inclusion of _timestamp field and its validation:
|
||||
String indexName = DataStream.BACKING_INDEX_PREFIX + temporaryIndexName;
|
||||
// Parse mappings to ensure they are valid after being composed
|
||||
|
||||
if (template.getDataStreamTemplate() != null) {
|
||||
// If there is no _data_stream meta field mapper and a data stream should be created then
|
||||
// fail as if the data_stream field can't be parsed:
|
||||
if (tempIndexService.mapperService().isMetadataField("_data_stream_timestamp") == false) {
|
||||
// Fail like a parsing expection, since we will be moving data_stream template out of server module and
|
||||
// then we would fail with the same error message, like we do here.
|
||||
throw new XContentParseException("[index_template] unknown field [data_stream]");
|
||||
}
|
||||
}
|
||||
|
||||
List<CompressedXContent> mappings = collectMappings(stateWithIndex, templateName, indexName );
|
||||
try {
|
||||
MapperService mapperService = tempIndexService.mapperService();
|
||||
|
@ -119,7 +119,7 @@ class MapperMergeValidator {
|
||||
DocumentMapper newMapper) {
|
||||
validateCopyTo(fieldMappers, fullPathObjectMappers, fieldTypes);
|
||||
validateFieldAliasTargets(fieldAliasMappers, fullPathObjectMappers);
|
||||
validateTimestampFieldMapper(metadataMappers, newMapper);
|
||||
validateMetadataFieldMappers(metadataMappers, newMapper);
|
||||
}
|
||||
|
||||
private static void validateCopyTo(List<FieldMapper> fieldMappers,
|
||||
@ -174,11 +174,9 @@ class MapperMergeValidator {
|
||||
}
|
||||
}
|
||||
|
||||
private static void validateTimestampFieldMapper(MetadataFieldMapper[] metadataMappers, DocumentMapper newMapper) {
|
||||
private static void validateMetadataFieldMappers(MetadataFieldMapper[] metadataMappers, DocumentMapper newMapper) {
|
||||
for (MetadataFieldMapper metadataFieldMapper : metadataMappers) {
|
||||
if (metadataFieldMapper instanceof TimestampFieldMapper) {
|
||||
((TimestampFieldMapper) metadataFieldMapper).validate(newMapper.mappers());
|
||||
}
|
||||
metadataFieldMapper.validate(newMapper.mappers());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -206,7 +206,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
/**
|
||||
* Parses the mappings (formatted as JSON) into a map
|
||||
*/
|
||||
public static Map<String, Object> parseMapping(NamedXContentRegistry xContentRegistry, String mappingSource) throws Exception {
|
||||
public static Map<String, Object> parseMapping(NamedXContentRegistry xContentRegistry, String mappingSource) throws IOException {
|
||||
try (XContentParser parser = XContentType.JSON.xContent()
|
||||
.createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, mappingSource)) {
|
||||
return parser.map();
|
||||
|
@ -69,6 +69,14 @@ public abstract class MetadataFieldMapper extends FieldMapper {
|
||||
super(mappedFieldType.name(), fieldType, mappedFieldType, MultiFields.empty(), CopyTo.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when mapping gets merged. Provides the opportunity to validate other fields a metadata field mapper
|
||||
* is supposed to work with before a mapping update is completed.
|
||||
*/
|
||||
public void validate(DocumentFieldMappers lookup) {
|
||||
// noop by default
|
||||
}
|
||||
|
||||
/**
|
||||
* Called before {@link FieldMapper#parse(ParseContext)} on the {@link RootObjectMapper}.
|
||||
*/
|
||||
|
@ -51,7 +51,6 @@ import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.VersionFieldMapper;
|
||||
import org.elasticsearch.index.seqno.RetentionLeaseBackgroundSyncAction;
|
||||
@ -164,7 +163,6 @@ public class IndicesModule extends AbstractModule {
|
||||
builtInMetadataMappers.put(TypeFieldMapper.NAME, new TypeFieldMapper.TypeParser());
|
||||
builtInMetadataMappers.put(VersionFieldMapper.NAME, new VersionFieldMapper.TypeParser());
|
||||
builtInMetadataMappers.put(SeqNoFieldMapper.NAME, new SeqNoFieldMapper.TypeParser());
|
||||
builtInMetadataMappers.put(TimestampFieldMapper.NAME, new TimestampFieldMapper.TypeParser());
|
||||
//_field_names must be added last so that it has a chance to see all the other mappers
|
||||
builtInMetadataMappers.put(FieldNamesFieldMapper.NAME, new FieldNamesFieldMapper.TypeParser());
|
||||
return Collections.unmodifiableMap(builtInMetadataMappers);
|
||||
|
@ -60,10 +60,11 @@ import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentFieldMappers;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidIndexNameException;
|
||||
@ -81,7 +82,7 @@ import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.MetadataCreateDataStreamServiceTests.generateMapping;
|
||||
import static org.elasticsearch.cluster.DataStreamTestHelper.generateMapping;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
@ -553,11 +554,14 @@ public class MetadataRolloverServiceTests extends ESTestCase {
|
||||
.put("index.version.created", Version.CURRENT)
|
||||
.build();
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(settings, new ContentPath(0));
|
||||
TimestampFieldMapper.Builder fieldBuilder = new TimestampFieldMapper.Builder();
|
||||
fieldBuilder.setPath("@timestamp");
|
||||
DateFieldMapper dateFieldMapper = new DateFieldMapper.Builder("@timestamp").build(builderContext);
|
||||
MetadataFieldMapper mockedTimestampField = mock(MetadataFieldMapper.class);
|
||||
when(mockedTimestampField.name()).thenReturn("_data_stream_timestamp");
|
||||
MappedFieldType mockedTimestampFieldType = mock(MappedFieldType.class);
|
||||
when(mockedTimestampFieldType.name()).thenReturn("_data_stream_timestamp");
|
||||
when(mockedTimestampField.fieldType()).thenReturn(mockedTimestampFieldType);
|
||||
DocumentFieldMappers documentFieldMappers =
|
||||
new DocumentFieldMappers(Arrays.asList(fieldBuilder.build(builderContext), dateFieldMapper),
|
||||
new DocumentFieldMappers(Arrays.asList(mockedTimestampField, dateFieldMapper),
|
||||
Collections.emptyList(), new StandardAnalyzer());
|
||||
|
||||
ClusterService clusterService = ClusterServiceUtils.createClusterService(testThreadPool);
|
||||
@ -568,7 +572,8 @@ public class MetadataRolloverServiceTests extends ESTestCase {
|
||||
DocumentMapper documentMapper = mock(DocumentMapper.class);
|
||||
when(documentMapper.mappers()).thenReturn(documentFieldMappers);
|
||||
when(documentMapper.type()).thenReturn("_doc");
|
||||
CompressedXContent mapping = new CompressedXContent(generateMapping(dataStream.getTimeStampField().getName()));
|
||||
CompressedXContent mapping =
|
||||
new CompressedXContent("{\"_doc\":" + generateMapping(dataStream.getTimeStampField().getName(), "date") + "}");
|
||||
when(documentMapper.mappingSource()).thenReturn(mapping);
|
||||
RoutingFieldMapper routingFieldMapper = mock(RoutingFieldMapper.class);
|
||||
when(routingFieldMapper.required()).thenReturn(false);
|
||||
|
@ -25,16 +25,13 @@ import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService.CreateDataStreamClusterStateUpdateRequest;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.MapperTestUtils;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.elasticsearch.cluster.DataStreamTestHelper.createFirstBackingIndex;
|
||||
import static org.elasticsearch.cluster.DataStreamTestHelper.createTimestampField;
|
||||
import static org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping;
|
||||
import static org.elasticsearch.cluster.DataStreamTestHelper.generateMapping;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
@ -152,34 +149,6 @@ public class MetadataCreateDataStreamServiceTests extends ESTestCase {
|
||||
return MetadataCreateDataStreamService.createDataStream(metadataCreateIndexService, cs, req);
|
||||
}
|
||||
|
||||
public void testValidateTimestampFieldMapping() throws Exception {
|
||||
String mapping = generateMapping("@timestamp", "date");
|
||||
validateTimestampFieldMapping("@timestamp", createMapperService(mapping));
|
||||
mapping = generateMapping("@timestamp", "date_nanos");
|
||||
validateTimestampFieldMapping("@timestamp", createMapperService(mapping));
|
||||
}
|
||||
|
||||
public void testValidateTimestampFieldMappingNoFieldMapping() {
|
||||
Exception e = expectThrows(IllegalArgumentException.class,
|
||||
() -> validateTimestampFieldMapping("@timestamp", createMapperService("{}")));
|
||||
assertThat(e.getMessage(),
|
||||
equalTo("[_timestamp] meta field doesn't point to data stream timestamp field [@timestamp]"));
|
||||
|
||||
String mapping = generateMapping("@timestamp2", "date");
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> validateTimestampFieldMapping("@timestamp", createMapperService(mapping)));
|
||||
assertThat(e.getMessage(),
|
||||
equalTo("[_timestamp] meta field doesn't point to data stream timestamp field [@timestamp]"));
|
||||
}
|
||||
|
||||
public void testValidateTimestampFieldMappingInvalidFieldType() {
|
||||
String mapping = generateMapping("@timestamp", "keyword");
|
||||
Exception e = expectThrows(IllegalArgumentException.class,
|
||||
() -> validateTimestampFieldMapping("@timestamp", createMapperService(mapping)));
|
||||
assertThat(e.getMessage(), equalTo("the configured timestamp field [@timestamp] is of type [keyword], " +
|
||||
"but [date,date_nanos] is expected"));
|
||||
}
|
||||
|
||||
private static MetadataCreateIndexService getMetadataCreateIndexService() throws Exception {
|
||||
MetadataCreateIndexService s = mock(MetadataCreateIndexService.class);
|
||||
when(s.applyCreateIndexRequest(any(ClusterState.class), any(CreateIndexClusterStateUpdateRequest.class), anyBoolean()))
|
||||
@ -203,35 +172,4 @@ public class MetadataCreateDataStreamServiceTests extends ESTestCase {
|
||||
return s;
|
||||
}
|
||||
|
||||
public static String generateMapping(String timestampFieldName) {
|
||||
return generateMapping(timestampFieldName, "date");
|
||||
}
|
||||
|
||||
static String generateMapping(String timestampFieldName, String type) {
|
||||
return "{\n" +
|
||||
" \"_timestamp\": {\n" +
|
||||
" \"path\": \"" + timestampFieldName + "\"\n" +
|
||||
" }," +
|
||||
" \"properties\": {\n" +
|
||||
" \"" + timestampFieldName + "\": {\n" +
|
||||
" \"type\": \"" + type + "\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
}
|
||||
|
||||
MapperService createMapperService(String mapping) throws IOException {
|
||||
String indexName = "test";
|
||||
IndexMetadata indexMetadata = IndexMetadata.builder(indexName)
|
||||
.settings(Settings.builder()
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1))
|
||||
.putMapping("_doc", mapping)
|
||||
.build();
|
||||
MapperService mapperService =
|
||||
MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, indexName);
|
||||
mapperService.merge(indexMetadata, MapperService.MergeReason.MAPPING_UPDATE);
|
||||
return mapperService;
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
@ -33,21 +35,31 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TextSearchInfo;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.indices.IndexTemplateMissingException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidIndexTemplateException;
|
||||
import org.elasticsearch.plugins.MapperPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
@ -75,6 +87,12 @@ import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.matchesRegex;
|
||||
|
||||
public class MetadataIndexTemplateServiceTests extends ESSingleNodeTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return Collections.singletonList(DummyPlugin.class);
|
||||
}
|
||||
|
||||
public void testIndexTemplateInvalidNumberOfShards() {
|
||||
PutRequest request = new PutRequest("test", "test_shards");
|
||||
request.patterns(singletonList("test_shards*"));
|
||||
@ -1289,4 +1307,83 @@ public class MetadataIndexTemplateServiceTests extends ESSingleNodeTestCase {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Composable index template with data_stream definition need _timestamp meta field mapper,
|
||||
// this is a dummy impl, so that tests don't fail with the fact that the _timestamp field can't be found.
|
||||
// (tests using this dummy impl doesn't test the _timestamp validation, but need it to tests other functionality)
|
||||
public static class DummyPlugin extends Plugin implements MapperPlugin {
|
||||
|
||||
@Override
|
||||
public Map<String, MetadataFieldMapper.TypeParser> getMetadataMappers() {
|
||||
return Collections.singletonMap("_data_stream_timestamp", new MetadataFieldMapper.TypeParser() {
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder<?> parse(String name,
|
||||
Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException {
|
||||
String path = (String) node.remove("path");
|
||||
return new MetadataFieldMapper.Builder(name, new FieldType()) {
|
||||
@Override
|
||||
public MetadataFieldMapper build(Mapper.BuilderContext context) {
|
||||
return newInstance(path);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext parserContext) {
|
||||
return newInstance(null);
|
||||
}
|
||||
|
||||
MetadataFieldMapper newInstance(String path) {
|
||||
FieldType fieldType = new FieldType();
|
||||
fieldType.freeze();
|
||||
MappedFieldType mappedFieldType =
|
||||
new MappedFieldType("_data_stream_timestamp", false, false, TextSearchInfo.NONE, Collections.emptyMap()) {
|
||||
@Override
|
||||
public String typeName() {
|
||||
return "_data_stream_timestamp";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query existsQuery(QueryShardContext context) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
return new MetadataFieldMapper(fieldType, mappedFieldType) {
|
||||
@Override
|
||||
public void preParse(ParseContext context) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void parseCreateField(ParseContext context) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (path == null) {
|
||||
return builder;
|
||||
}
|
||||
|
||||
builder.startObject(simpleName());
|
||||
builder.field("path", path);
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String contentType() {
|
||||
return "_data_stream_timestamp";
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -29,16 +29,14 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.mapper.MapperService.MergeReason;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.elasticsearch.index.MapperTestUtils.assertConflicts;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class SourceFieldMapperTests extends ESSingleNodeTestCase {
|
||||
@ -121,19 +119,6 @@ public class SourceFieldMapperTests extends ESSingleNodeTestCase {
|
||||
assertThat(sourceAsMap.containsKey("path2"), equalTo(true));
|
||||
}
|
||||
|
||||
static void assertConflicts(String mapping1, String mapping2, DocumentMapperParser parser, String... conflicts) throws IOException {
|
||||
DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping1));
|
||||
if (conflicts.length == 0) {
|
||||
docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping(), MergeReason.MAPPING_UPDATE);
|
||||
} else {
|
||||
Exception e = expectThrows(IllegalArgumentException.class,
|
||||
() -> docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping(), MergeReason.MAPPING_UPDATE));
|
||||
for (String conflict : conflicts) {
|
||||
assertThat(e.getMessage(), containsString(conflict));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testEnabledNotUpdateable() throws Exception {
|
||||
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
||||
// using default of true
|
||||
|
@ -1,202 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.index.mapper.SourceFieldMapperTests.assertConflicts;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class TimestampFieldMapperTests extends ESSingleNodeTestCase {
|
||||
|
||||
public void testPostParse() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_timestamp").field("path", "@timestamp").endObject()
|
||||
.startObject("properties").startObject("@timestamp").field("type",
|
||||
randomBoolean() ? "date" : "date_nanos").endObject().endObject()
|
||||
.endObject().endObject());
|
||||
DocumentMapper docMapper = createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE);
|
||||
|
||||
ParsedDocument doc = docMapper.parse(new SourceToParse("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("@timestamp", "2020-12-12")
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
assertThat(doc.rootDoc().getFields("@timestamp").length, equalTo(2));
|
||||
|
||||
Exception e = expectThrows(MapperException.class, () -> docMapper.parse(new SourceToParse("test", "type", "1",
|
||||
BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("@timestamp1", "2020-12-12")
|
||||
.endObject()),
|
||||
XContentType.JSON)));
|
||||
assertThat(e.getCause().getMessage(), equalTo("data stream timestamp field [@timestamp] is missing"));
|
||||
|
||||
e = expectThrows(MapperException.class, () -> docMapper.parse(new SourceToParse("test", "type", "1",
|
||||
BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.array("@timestamp", "2020-12-12", "2020-12-13")
|
||||
.endObject()),
|
||||
XContentType.JSON)));
|
||||
assertThat(e.getCause().getMessage(), equalTo("data stream timestamp field [@timestamp] encountered multiple values"));
|
||||
}
|
||||
|
||||
public void testValidateNonExistingField() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_timestamp").field("path", "non-existing-field").endObject()
|
||||
.startObject("properties").startObject("@timestamp").field("type", "date").endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE));
|
||||
assertThat(e.getMessage(), equalTo("the configured timestamp field [non-existing-field] does not exist"));
|
||||
}
|
||||
|
||||
public void testValidateInvalidFieldType() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_timestamp").field("path", "@timestamp").endObject()
|
||||
.startObject("properties").startObject("@timestamp").field("type", "keyword").endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE));
|
||||
assertThat(e.getMessage(),
|
||||
equalTo("the configured timestamp field [@timestamp] is of type [keyword], but [date,date_nanos] is expected"));
|
||||
}
|
||||
|
||||
public void testValidateNotIndexed() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_timestamp").field("path", "@timestamp").endObject()
|
||||
.startObject("properties").startObject("@timestamp").field("type", "date").field("index", "false").endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE));
|
||||
assertThat(e.getMessage(), equalTo("the configured timestamp field [@timestamp] is not indexed"));
|
||||
}
|
||||
|
||||
public void testValidateNotDocValues() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_timestamp").field("path", "@timestamp").endObject()
|
||||
.startObject("properties").startObject("@timestamp").field("type", "date").field("doc_values", "false").endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE));
|
||||
assertThat(e.getMessage(), equalTo("the configured timestamp field [@timestamp] doesn't have doc values"));
|
||||
}
|
||||
|
||||
public void testValidateNullValue() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_timestamp").field("path", "@timestamp").endObject()
|
||||
.startObject("properties").startObject("@timestamp").field("type", "date")
|
||||
.field("null_value", "2020-12-12").endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE));
|
||||
assertThat(e.getMessage(),
|
||||
equalTo("the configured timestamp field [@timestamp] has disallowed [null_value] attribute specified"));
|
||||
}
|
||||
|
||||
public void testValidateIgnoreMalformed() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_timestamp").field("path", "@timestamp").endObject()
|
||||
.startObject("properties").startObject("@timestamp").field("type", "date").field("ignore_malformed", "true")
|
||||
.endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE));
|
||||
assertThat(e.getMessage(),
|
||||
equalTo("the configured timestamp field [@timestamp] has disallowed [ignore_malformed] attribute specified"));
|
||||
}
|
||||
|
||||
public void testValidateNotDisallowedAttribute() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_timestamp").field("path", "@timestamp").endObject()
|
||||
.startObject("properties").startObject("@timestamp").field("type", "date").field("store", "true")
|
||||
.endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE));
|
||||
assertThat(e.getMessage(),
|
||||
equalTo("the configured timestamp field [@timestamp] has disallowed attributes: [store]"));
|
||||
}
|
||||
|
||||
public void testCannotUpdateTimestampField() throws IOException {
|
||||
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
||||
String mapping1 = "{\"type\":{\"_timestamp\":{\"path\":\"@timestamp\"}, \"properties\": {\"@timestamp\": {\"type\": \"date\"}}}}}";
|
||||
String mapping2 = "{\"type\":{\"_timestamp\":{\"path\":\"@timestamp2\"}, \"properties\": {\"@timestamp2\": {\"type\": \"date\"}," +
|
||||
"\"@timestamp\": {\"type\": \"date\"}}}})";
|
||||
assertConflicts(mapping1, mapping2, parser, "cannot update path setting for [_timestamp]");
|
||||
|
||||
mapping1 = "{\"type\":{\"properties\":{\"@timestamp\": {\"type\": \"date\"}}}}}";
|
||||
mapping2 = "{\"type\":{\"_timestamp\":{\"path\":\"@timestamp2\"}, \"properties\": {\"@timestamp2\": {\"type\": \"date\"}," +
|
||||
"\"@timestamp\": {\"type\": \"date\"}}}})";
|
||||
assertConflicts(mapping1, mapping2, parser, "cannot update path setting for [_timestamp]");
|
||||
}
|
||||
|
||||
public void testDifferentTSField() throws IOException {
|
||||
String mapping = "{\n" +
|
||||
" \"_timestamp\": {\n" +
|
||||
" \"path\": \"event.my_timestamp\"\n" +
|
||||
" },\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"event\": {\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"my_timestamp\": {\n" +
|
||||
" \"type\": \"date\"" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
DocumentMapper docMapper = createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE);
|
||||
|
||||
ParsedDocument doc = docMapper.parse(new SourceToParse("test", "_doc", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("event.my_timestamp", "2020-12-12")
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
assertThat(doc.rootDoc().getFields("event.my_timestamp").length, equalTo(2));
|
||||
|
||||
Exception e = expectThrows(MapperException.class, () -> docMapper.parse(new SourceToParse("test", "_doc", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("event.timestamp", "2020-12-12")
|
||||
.endObject()),
|
||||
XContentType.JSON)));
|
||||
assertThat(e.getCause().getMessage(), equalTo("data stream timestamp field [event.my_timestamp] is missing"));
|
||||
}
|
||||
|
||||
}
|
@ -21,7 +21,6 @@ package org.elasticsearch.indices;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.index.mapper.AllFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.mapper.FieldNamesFieldMapper;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.IgnoredFieldMapper;
|
||||
@ -89,11 +88,11 @@ public class IndicesModuleTests extends ESTestCase {
|
||||
|
||||
private static String[] EXPECTED_METADATA_FIELDS = new String[]{IgnoredFieldMapper.NAME, IdFieldMapper.NAME,
|
||||
RoutingFieldMapper.NAME, IndexFieldMapper.NAME, SourceFieldMapper.NAME, TypeFieldMapper.NAME,
|
||||
VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, TimestampFieldMapper.NAME, FieldNamesFieldMapper.NAME};
|
||||
VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, FieldNamesFieldMapper.NAME};
|
||||
|
||||
private static String[] EXPECTED_METADATA_FIELDS_6x = new String[]{AllFieldMapper.NAME, IgnoredFieldMapper.NAME,
|
||||
IdFieldMapper.NAME, RoutingFieldMapper.NAME, IndexFieldMapper.NAME, SourceFieldMapper.NAME, TypeFieldMapper.NAME,
|
||||
VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, TimestampFieldMapper.NAME, FieldNamesFieldMapper.NAME};
|
||||
VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, FieldNamesFieldMapper.NAME};
|
||||
|
||||
public void testBuiltinMappers() {
|
||||
IndicesModule module = new IndicesModule(Collections.emptyList());
|
||||
|
@ -55,4 +55,27 @@ public final class DataStreamTestHelper {
|
||||
public static DataStream.TimestampField createTimestampField(String fieldName) {
|
||||
return new DataStream.TimestampField(fieldName);
|
||||
}
|
||||
|
||||
public static String generateMapping(String timestampFieldName) {
|
||||
return "{\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"" + timestampFieldName + "\": {\n" +
|
||||
" \"type\": \"date\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
}
|
||||
|
||||
public static String generateMapping(String timestampFieldName, String type) {
|
||||
return "{\n" +
|
||||
" \"_data_stream_timestamp\": {\n" +
|
||||
" \"path\": \"" + timestampFieldName + "\"\n" +
|
||||
" }," +
|
||||
" \"properties\": {\n" +
|
||||
" \"" + timestampFieldName + "\": {\n" +
|
||||
" \"type\": \"" + type + "\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
}
|
||||
}
|
||||
|
@ -21,11 +21,15 @@ package org.elasticsearch.index;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperParser;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MapperService.MergeReason;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
@ -35,7 +39,10 @@ import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.apache.lucene.util.LuceneTestCase.expectThrows;
|
||||
import static org.elasticsearch.test.ESTestCase.createTestAnalysis;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
|
||||
public class MapperTestUtils {
|
||||
@ -68,4 +75,20 @@ public class MapperTestUtils {
|
||||
mapperRegistry,
|
||||
() -> null, () -> false);
|
||||
}
|
||||
|
||||
public static void assertConflicts(String mapping1,
|
||||
String mapping2,
|
||||
DocumentMapperParser
|
||||
parser, String... conflicts) throws IOException {
|
||||
DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping1));
|
||||
if (conflicts.length == 0) {
|
||||
docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping(), MergeReason.MAPPING_UPDATE);
|
||||
} else {
|
||||
Exception e = expectThrows(IllegalArgumentException.class,
|
||||
() -> docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping(), MergeReason.MAPPING_UPDATE));
|
||||
for (String conflict : conflicts) {
|
||||
assertThat(e.getMessage(), containsString(conflict));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public abstract class AbstractFullClusterRestartTestCase extends ESRestTestCase {
|
||||
|
||||
private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster"));
|
||||
private static final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster"));
|
||||
|
||||
@Before
|
||||
public void init() throws IOException {
|
||||
@ -59,11 +59,11 @@ public abstract class AbstractFullClusterRestartTestCase extends ESRestTestCase
|
||||
}
|
||||
}
|
||||
|
||||
public final boolean isRunningAgainstOldCluster() {
|
||||
public static boolean isRunningAgainstOldCluster() {
|
||||
return runningAgainstOldCluster;
|
||||
}
|
||||
|
||||
private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version"));
|
||||
private static final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version"));
|
||||
|
||||
/**
|
||||
* @return true if test is running against an old cluster before that last major, in this case
|
||||
@ -73,7 +73,7 @@ public abstract class AbstractFullClusterRestartTestCase extends ESRestTestCase
|
||||
return isRunningAgainstOldCluster() && oldClusterVersion.before(Version.V_7_0_0);
|
||||
}
|
||||
|
||||
public final Version getOldClusterVersion() {
|
||||
public static Version getOldClusterVersion() {
|
||||
return oldClusterVersion;
|
||||
}
|
||||
|
||||
@ -122,7 +122,7 @@ public abstract class AbstractFullClusterRestartTestCase extends ESRestTestCase
|
||||
return true;
|
||||
}
|
||||
|
||||
protected void assertNoFailures(Map<?, ?> response) {
|
||||
protected static void assertNoFailures(Map<?, ?> response) {
|
||||
int failed = (int) XContentMapValues.extractValue("_shards.failed", response);
|
||||
assertEquals(0, failed);
|
||||
}
|
||||
@ -132,7 +132,7 @@ public abstract class AbstractFullClusterRestartTestCase extends ESRestTestCase
|
||||
assertEquals(response.toString(), expectedTotalHits, actualTotalHits);
|
||||
}
|
||||
|
||||
protected int extractTotalHits(Map<?, ?> response) {
|
||||
protected static int extractTotalHits(Map<?, ?> response) {
|
||||
if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_7_0_0)) {
|
||||
return (Integer) XContentMapValues.extractValue("hits.total", response);
|
||||
} else {
|
||||
|
34
x-pack/plugin/data-streams/build.gradle
Normal file
34
x-pack/plugin/data-streams/build.gradle
Normal file
@ -0,0 +1,34 @@
|
||||
import org.elasticsearch.gradle.info.BuildParams
|
||||
|
||||
evaluationDependsOn(xpackModule('core'))
|
||||
|
||||
apply plugin: 'elasticsearch.esplugin'
|
||||
apply plugin: 'elasticsearch.internal-cluster-test'
|
||||
esplugin {
|
||||
name 'x-pack-data-streams'
|
||||
description 'Elasticsearch Expanded Pack Plugin - Data Streams'
|
||||
classname 'org.elasticsearch.xpack.datastreams.DataStreamsPlugin'
|
||||
extendedPlugins = ['x-pack-core']
|
||||
}
|
||||
archivesBaseName = 'x-pack-data-streams'
|
||||
integTest.enabled = false
|
||||
|
||||
tasks.named('internalClusterTest').configure {
|
||||
if (BuildParams.isSnapshotBuild() == false) {
|
||||
systemProperty 'es.datastreams_feature_enabled', 'true'
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compileOnly project(path: xpackModule('core'), configuration: 'default')
|
||||
testImplementation project(path: xpackModule('core'), configuration: 'testArtifacts')
|
||||
}
|
||||
|
||||
// add all sub-projects of the qa sub-project
|
||||
gradle.projectsEvaluated {
|
||||
project.subprojects
|
||||
.find { it.path == project.path + ":qa" }
|
||||
.subprojects
|
||||
.findAll { it.path.startsWith(project.path + ":qa") }
|
||||
.each { check.dependsOn it.check }
|
||||
}
|
8
x-pack/plugin/data-streams/qa/build.gradle
Normal file
8
x-pack/plugin/data-streams/qa/build.gradle
Normal file
@ -0,0 +1,8 @@
|
||||
import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
test.enabled = false
|
||||
|
||||
dependencies {
|
||||
api project(':test:framework')
|
||||
}
|
20
x-pack/plugin/data-streams/qa/rest/build.gradle
Normal file
20
x-pack/plugin/data-streams/qa/rest/build.gradle
Normal file
@ -0,0 +1,20 @@
|
||||
apply plugin: 'elasticsearch.testclusters'
|
||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
apply plugin: 'elasticsearch.rest-resources'
|
||||
|
||||
restResources {
|
||||
restApi {
|
||||
includeCore 'bulk', 'count', 'search', '_common', 'indices', 'index', 'cluster', 'rank_eval', 'reindex', 'update_by_query', 'delete_by_query'
|
||||
includeXpack 'enrich'
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
testImplementation project(path: xpackModule('data-streams'))
|
||||
}
|
||||
|
||||
testClusters.integTest {
|
||||
testDistribution = 'DEFAULT'
|
||||
setting 'xpack.license.self_generated.type', 'basic'
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.datastreams;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
|
||||
public class DataStreamsRestIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public DataStreamsRestIT(final ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws Exception {
|
||||
return ESClientYamlSuiteTestCase.createParameters();
|
||||
}
|
||||
|
||||
}
|
@ -2,8 +2,8 @@
|
||||
"DBQ from data stream":
|
||||
- skip:
|
||||
features: allowed_warnings
|
||||
version: " - 7.99.99"
|
||||
reason: "change to 7.8.99 after backport"
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
@ -282,8 +282,8 @@ setup:
|
||||
---
|
||||
"Indexing a document into a data stream without a timestamp field":
|
||||
- skip:
|
||||
version: " - 7.9.99"
|
||||
reason: "enable in 7.9+ when backported"
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
@ -2,8 +2,8 @@
|
||||
"Update by query from data stream":
|
||||
- skip:
|
||||
features: allowed_warnings
|
||||
version: " - 7.99.99"
|
||||
reason: "change to 7.8.99 after backport"
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
@ -0,0 +1,232 @@
|
||||
---
|
||||
"Test apis that do not supported data streams":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template
|
||||
body:
|
||||
index_patterns: [logs-*]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: logs-foobar
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: logs-foobar
|
||||
refresh: true
|
||||
body:
|
||||
'@timestamp': '2020-12-12'
|
||||
foo: bar
|
||||
- match: {_index: .ds-logs-foobar-000001}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: logs-foobar
|
||||
body: { query: { match_all: {} } }
|
||||
- length: { hits.hits: 1 }
|
||||
- match: { hits.hits.0._index: .ds-logs-foobar-000001 }
|
||||
- match: { hits.hits.0._source.foo: 'bar' }
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
indices.delete:
|
||||
index: logs-foobar
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: logs-foobar
|
||||
- is_true: acknowledged
|
||||
|
||||
---
|
||||
"Prohibit clone on data stream's write index":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.clone:
|
||||
index: ".ds-simple-data-stream1-000001"
|
||||
target: "target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 2
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
---
|
||||
"APIs temporarily muted":
|
||||
- skip:
|
||||
version: "all"
|
||||
reason: "restore to above test after data stream resolution PRs have been merged"
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.close:
|
||||
index: logs-*
|
||||
|
||||
---
|
||||
"Prohibit shrink on data stream's write index":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.shrink:
|
||||
index: ".ds-simple-data-stream1-000001"
|
||||
target: "target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
---
|
||||
"Close write index for data stream fails":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.close:
|
||||
index: ".ds-simple-data-stream1-000001"
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
---
|
||||
"Prohibit split on data stream's write index":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.split:
|
||||
index: ".ds-simple-data-stream1-000001"
|
||||
target: "target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 4
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
@ -0,0 +1,331 @@
|
||||
---
|
||||
setup:
|
||||
- skip:
|
||||
features: allowed_warnings
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [logs_template] has index patterns [logs-foobar] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs_template] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: logs_template
|
||||
body:
|
||||
index_patterns: logs-foobar
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: logs-foobar
|
||||
|
||||
---
|
||||
teardown:
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: logs-foobar
|
||||
|
||||
---
|
||||
"Verify get index api":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
|
||||
- do:
|
||||
indices.get:
|
||||
index: logs-foobar
|
||||
- is_true: \.ds-logs-foobar-000001
|
||||
- is_false: logs-foobar
|
||||
- match: { \.ds-logs-foobar-000001.settings.index.number_of_shards: '1' }
|
||||
|
||||
---
|
||||
"Verify get mapping api":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
index: logs-foobar
|
||||
- is_true: \.ds-logs-foobar-000001.mappings
|
||||
- is_false: \.ds-logs-foobar.mappings
|
||||
|
||||
---
|
||||
"Verify shard stores api":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
settings:
|
||||
number_of_shards: "1"
|
||||
number_of_replicas: "0"
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
# rollover data stream to create new backing index
|
||||
- do:
|
||||
indices.rollover:
|
||||
alias: "simple-data-stream1"
|
||||
|
||||
- match: { old_index: .ds-simple-data-stream1-000001 }
|
||||
- match: { new_index: .ds-simple-data-stream1-000002 }
|
||||
- match: { rolled_over: true }
|
||||
- match: { dry_run: false }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: simple-data-stream1
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.shard_stores:
|
||||
index: simple-data-stream1
|
||||
status: "green"
|
||||
|
||||
- match: { indices.\.ds-simple-data-stream1-000001.shards.0.stores.0.allocation: "primary" }
|
||||
- match: { indices.\.ds-simple-data-stream1-000002.shards.0.stores.0.allocation: "primary" }
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
---
|
||||
"Verify search shards api":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
search_shards:
|
||||
index: "simple-data-stream1"
|
||||
|
||||
- match: { shards.0.0.index: ".ds-simple-data-stream1-000001" }
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
---
|
||||
"Verify get field mappings api":
|
||||
- skip:
|
||||
features: allowed_warnings
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
indices.get_field_mapping:
|
||||
index: simple-data-stream1
|
||||
fields: foo
|
||||
|
||||
- is_true: \.ds-simple-data-stream1-000001
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
---
|
||||
"Open write index for data stream opens all backing indices":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
# rollover data stream twice to create new backing indices
|
||||
- do:
|
||||
indices.rollover:
|
||||
alias: "simple-data-stream1"
|
||||
|
||||
- match: { old_index: .ds-simple-data-stream1-000001 }
|
||||
- match: { new_index: .ds-simple-data-stream1-000002 }
|
||||
- match: { rolled_over: true }
|
||||
- match: { dry_run: false }
|
||||
|
||||
- do:
|
||||
indices.rollover:
|
||||
alias: "simple-data-stream1"
|
||||
|
||||
- match: { old_index: .ds-simple-data-stream1-000002 }
|
||||
- match: { new_index: .ds-simple-data-stream1-000003 }
|
||||
- match: { rolled_over: true }
|
||||
- match: { dry_run: false }
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: ".ds-simple-data-stream1-000001,.ds-simple-data-stream1-000002"
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
indices.open:
|
||||
index: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
# all closed backing indices should be re-opened and returned
|
||||
- do:
|
||||
indices.get:
|
||||
index: ".ds-simple-data-stream1-*"
|
||||
|
||||
- is_true: \.ds-simple-data-stream1-000001.settings
|
||||
- is_true: \.ds-simple-data-stream1-000002.settings
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
---
|
||||
"Verify rank eval with data streams":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: logs-foobar
|
||||
id: doc1
|
||||
op_type: create
|
||||
body: { "text": "berlin", "@timestamp": "2020-01-01" }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: logs-foobar
|
||||
id: doc2
|
||||
op_type: create
|
||||
body: { "text": "amsterdam", "@timestamp": "2020-01-01" }
|
||||
|
||||
# rollover data stream to split documents across multiple backing indices
|
||||
- do:
|
||||
indices.rollover:
|
||||
alias: "logs-foobar"
|
||||
|
||||
- match: { old_index: .ds-logs-foobar-000001 }
|
||||
- match: { new_index: .ds-logs-foobar-000002 }
|
||||
- match: { rolled_over: true }
|
||||
- match: { dry_run: false }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: logs-foobar
|
||||
id: doc3
|
||||
op_type: create
|
||||
body: { "text": "amsterdam", "@timestamp": "2020-01-01" }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: logs-foobar
|
||||
id: doc4
|
||||
op_type: create
|
||||
body: { "text": "something about amsterdam and berlin", "@timestamp": "2020-01-01" }
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: logs-foobar
|
||||
|
||||
- do:
|
||||
rank_eval:
|
||||
index: logs-foobar
|
||||
search_type: query_then_fetch
|
||||
body: {
|
||||
"requests" : [
|
||||
{
|
||||
"id": "amsterdam_query",
|
||||
"request": { "query": { "match" : {"text" : "amsterdam" }}},
|
||||
"ratings": [
|
||||
{"_index": ".ds-logs-foobar-000001", "_id": "doc1", "rating": 0},
|
||||
{"_index": ".ds-logs-foobar-000001", "_id": "doc2", "rating": 1},
|
||||
{"_index": ".ds-logs-foobar-000002", "_id": "doc3", "rating": 1}]
|
||||
},
|
||||
{
|
||||
"id" : "berlin_query",
|
||||
"request": { "query": { "match" : { "text" : "berlin" } }, "size" : 10 },
|
||||
"ratings": [{"_index": ".ds-logs-foobar-000001", "_id": "doc1", "rating": 1}]
|
||||
}
|
||||
],
|
||||
"metric" : { "precision": { "ignore_unlabeled" : true }}
|
||||
}
|
||||
|
||||
- match: { metric_score: 1}
|
||||
- match: { details.amsterdam_query.metric_score: 1.0}
|
||||
- length: { details.amsterdam_query.hits: 3}
|
||||
- match: { details.berlin_query.metric_score: 1.0}
|
@ -0,0 +1,178 @@
|
||||
---
|
||||
setup:
|
||||
- skip:
|
||||
version: "7.8.99 - "
|
||||
reason: "resolve index api only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template2] has index patterns [simple-data-stream2] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template2] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template2
|
||||
body:
|
||||
index_patterns: [simple-data-stream2]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp2':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp2'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream2
|
||||
|
||||
- do:
|
||||
indices.rollover:
|
||||
alias: "simple-data-stream2"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index1
|
||||
body:
|
||||
aliases:
|
||||
test_alias: {}
|
||||
test_blias: {}
|
||||
test_clias: {}
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index2
|
||||
body:
|
||||
aliases:
|
||||
test_alias: {}
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: test_index2
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index3
|
||||
body:
|
||||
aliases:
|
||||
test_blias: {}
|
||||
|
||||
---
|
||||
"Resolve index with indices, aliases, and data streams":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: "resolve index api only supported in 7.9+"
|
||||
|
||||
- do:
|
||||
indices.resolve_index:
|
||||
name: '*'
|
||||
|
||||
- match: {indices.0.name: test_index1}
|
||||
- match: {indices.0.aliases.0: test_alias}
|
||||
- match: {indices.0.aliases.1: test_blias}
|
||||
- match: {indices.0.aliases.2: test_clias}
|
||||
- match: {indices.0.attributes.0: open}
|
||||
- match: {indices.1.name: test_index3}
|
||||
- match: {indices.1.aliases.0: test_blias}
|
||||
- match: {indices.1.attributes.0: open}
|
||||
- match: {aliases.0.name: test_alias}
|
||||
- match: {aliases.0.indices.0: test_index1}
|
||||
- match: {aliases.0.indices.1: test_index2}
|
||||
- match: {aliases.1.name: test_blias}
|
||||
- match: {aliases.1.indices.0: test_index1}
|
||||
- match: {aliases.1.indices.1: test_index3}
|
||||
- match: {aliases.2.name: test_clias}
|
||||
- match: {aliases.2.indices.0: test_index1}
|
||||
- match: {data_streams.0.name: simple-data-stream1}
|
||||
- match: {data_streams.0.backing_indices.0: .ds-simple-data-stream1-000001}
|
||||
- match: {data_streams.0.timestamp_field: "@timestamp"}
|
||||
- match: {data_streams.1.name: simple-data-stream2}
|
||||
- match: {data_streams.1.backing_indices.0: .ds-simple-data-stream2-000001}
|
||||
- match: {data_streams.1.backing_indices.1: .ds-simple-data-stream2-000002}
|
||||
- match: {data_streams.1.timestamp_field: "@timestamp"}
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream2
|
||||
- is_true: acknowledged
|
||||
|
||||
---
|
||||
"Resolve index with hidden and closed indices":
|
||||
- skip:
|
||||
version: " - 7.8.99"
|
||||
reason: change after backporting
|
||||
|
||||
- do:
|
||||
indices.resolve_index:
|
||||
name: '*'
|
||||
expand_wildcards: [all]
|
||||
|
||||
- match: {indices.0.name: .ds-simple-data-stream1-000001}
|
||||
- match: {indices.0.attributes.0: hidden}
|
||||
- match: {indices.0.attributes.1: open}
|
||||
- match: {indices.0.data_stream: simple-data-stream1}
|
||||
- match: {indices.1.name: .ds-simple-data-stream2-000001}
|
||||
- match: {indices.1.attributes.0: hidden}
|
||||
- match: {indices.1.attributes.1: open}
|
||||
- match: {indices.1.data_stream: simple-data-stream2}
|
||||
- match: {indices.2.name: .ds-simple-data-stream2-000002}
|
||||
- match: {indices.2.attributes.0: hidden}
|
||||
- match: {indices.2.attributes.1: open}
|
||||
- match: {indices.2.data_stream: simple-data-stream2}
|
||||
- match: {indices.3.name: test_index1}
|
||||
- match: {indices.3.aliases.0: test_alias}
|
||||
- match: {indices.3.aliases.1: test_blias}
|
||||
- match: {indices.3.aliases.2: test_clias}
|
||||
- match: {indices.3.attributes.0: open}
|
||||
- match: {indices.4.name: test_index2}
|
||||
- match: {indices.4.aliases.0: test_alias}
|
||||
- match: {indices.4.attributes.0: closed}
|
||||
- match: {indices.5.name: test_index3}
|
||||
- match: {indices.5.aliases.0: test_blias}
|
||||
- match: {indices.5.attributes.0: open}
|
||||
- match: {aliases.0.name: test_alias}
|
||||
- match: {aliases.0.indices.0: test_index1}
|
||||
- match: {aliases.0.indices.1: test_index2}
|
||||
- match: {aliases.1.name: test_blias}
|
||||
- match: {aliases.1.indices.0: test_index1}
|
||||
- match: {aliases.1.indices.1: test_index3}
|
||||
- match: {aliases.2.name: test_clias}
|
||||
- match: {aliases.2.indices.0: test_index1}
|
||||
- match: {data_streams.0.name: simple-data-stream1}
|
||||
- match: {data_streams.0.backing_indices.0: .ds-simple-data-stream1-000001}
|
||||
- match: {data_streams.0.timestamp_field: "@timestamp"}
|
||||
- match: {data_streams.1.name: simple-data-stream2}
|
||||
- match: {data_streams.1.backing_indices.0: .ds-simple-data-stream2-000001}
|
||||
- match: {data_streams.1.backing_indices.1: .ds-simple-data-stream2-000002}
|
||||
- match: {data_streams.1.timestamp_field: "@timestamp"}
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream1
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
indices.delete_data_stream:
|
||||
name: simple-data-stream2
|
||||
- is_true: acknowledged
|
@ -26,8 +26,8 @@ teardown:
|
||||
---
|
||||
"Reindex from data stream into another data stream":
|
||||
- skip:
|
||||
version: " - 7.99.99"
|
||||
reason: "change to 7.8.99 after backport"
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
@ -59,8 +59,8 @@ teardown:
|
||||
---
|
||||
"Reindex from index into data stream":
|
||||
- skip:
|
||||
version: " - 7.99.99"
|
||||
reason: "change to 7.8.99 after backport"
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
@ -92,8 +92,8 @@ teardown:
|
||||
---
|
||||
"Reindex from data stream into an index":
|
||||
- skip:
|
||||
version: " - 7.99.99"
|
||||
reason: "change to 7.8.99 after backport"
|
||||
version: " - 7.8.99"
|
||||
reason: "data streams only supported in 7.9+"
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
File diff suppressed because it is too large
Load Diff
@ -1,23 +1,9 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.snapshots;
|
||||
package org.elasticsearch.datastreams;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
@ -34,12 +20,20 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.common.collect.List;
|
||||
import org.elasticsearch.indices.DataStreamIT;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
import org.elasticsearch.snapshots.SnapshotRestoreException;
|
||||
import org.elasticsearch.snapshots.SnapshotState;
|
||||
import org.elasticsearch.snapshots.mockstore.MockRepository;
|
||||
import org.elasticsearch.xpack.datastreams.DataStreamsPlugin;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
@ -61,6 +55,11 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
|
||||
private String id;
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return List.of(MockRepository.Plugin.class, DataStreamsPlugin.class);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setup() throws Exception {
|
||||
client = client();
|
||||
@ -86,7 +85,8 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
}
|
||||
|
||||
public void testSnapshotAndRestore() throws Exception {
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster()
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareCreateSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
@ -101,10 +101,12 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
assertEquals(1, snap.size());
|
||||
assertEquals(Collections.singletonList(DS_BACKING_INDEX_NAME), snap.get(0).indices());
|
||||
|
||||
assertTrue(client.admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[]{"ds"})).get()
|
||||
.isAcknowledged());
|
||||
assertTrue(
|
||||
client.admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[] { "ds" })).get().isAcknowledged()
|
||||
);
|
||||
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster()
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareRestoreSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
@ -117,56 +119,18 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
assertEquals(1, hits.length);
|
||||
assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap());
|
||||
|
||||
GetDataStreamAction.Response ds = client.admin().indices().getDataStreams(
|
||||
new GetDataStreamAction.Request(new String[]{"ds"})).get();
|
||||
GetDataStreamAction.Response ds = client.admin()
|
||||
.indices()
|
||||
.getDataStreams(new GetDataStreamAction.Request(new String[] { "ds" }))
|
||||
.get();
|
||||
assertEquals(1, ds.getDataStreams().size());
|
||||
assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size());
|
||||
assertEquals(DS_BACKING_INDEX_NAME, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName());
|
||||
}
|
||||
|
||||
public void testSnapshotAndRestoreAll() throws Exception {
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster()
|
||||
.prepareCreateSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
.setIncludeGlobalState(false)
|
||||
.get();
|
||||
|
||||
RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
|
||||
assertEquals(RestStatus.OK, status);
|
||||
|
||||
GetSnapshotsResponse snapshot = client.admin().cluster().prepareGetSnapshots(REPO).setSnapshots(SNAPSHOT).get();
|
||||
java.util.List<SnapshotInfo> snap = snapshot.getSnapshots();
|
||||
assertEquals(1, snap.size());
|
||||
assertEquals(Collections.singletonList(DS_BACKING_INDEX_NAME), snap.get(0).indices());
|
||||
|
||||
assertAcked(client.admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[]{"*"})).get());
|
||||
assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN));
|
||||
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster()
|
||||
.prepareRestoreSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setRestoreGlobalState(true)
|
||||
.get();
|
||||
|
||||
assertEquals(1, restoreSnapshotResponse.getRestoreInfo().successfulShards());
|
||||
|
||||
assertEquals(DOCUMENT_SOURCE, client.prepareGet(DS_BACKING_INDEX_NAME, "_doc", id).get().getSourceAsMap());
|
||||
SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits();
|
||||
assertEquals(1, hits.length);
|
||||
assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap());
|
||||
|
||||
GetDataStreamAction.Response ds = client.admin().indices().getDataStreams(
|
||||
new GetDataStreamAction.Request(new String[]{"ds"})).get();
|
||||
assertEquals(1, ds.getDataStreams().size());
|
||||
assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size());
|
||||
assertEquals(DS_BACKING_INDEX_NAME, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName());
|
||||
|
||||
assertAcked(client().admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[]{"ds"})).get());
|
||||
}
|
||||
|
||||
public void testRename() throws Exception {
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster()
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareCreateSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
@ -176,13 +140,58 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
|
||||
assertEquals(RestStatus.OK, status);
|
||||
|
||||
expectThrows(SnapshotRestoreException.class, () -> client.admin().cluster()
|
||||
GetSnapshotsResponse snapshot = client.admin().cluster().prepareGetSnapshots(REPO).setSnapshots(SNAPSHOT).get();
|
||||
java.util.List<SnapshotInfo> snap = snapshot.getSnapshots();
|
||||
assertEquals(1, snap.size());
|
||||
assertEquals(Collections.singletonList(DS_BACKING_INDEX_NAME), snap.get(0).indices());
|
||||
|
||||
assertAcked(client.admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[] { "*" })).get());
|
||||
assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN));
|
||||
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareRestoreSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
.get());
|
||||
.setRestoreGlobalState(true)
|
||||
.get();
|
||||
|
||||
client.admin().cluster()
|
||||
assertEquals(1, restoreSnapshotResponse.getRestoreInfo().successfulShards());
|
||||
|
||||
assertEquals(DOCUMENT_SOURCE, client.prepareGet(DS_BACKING_INDEX_NAME, "_doc", id).get().getSourceAsMap());
|
||||
SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits();
|
||||
assertEquals(1, hits.length);
|
||||
assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap());
|
||||
|
||||
GetDataStreamAction.Response ds = client.admin()
|
||||
.indices()
|
||||
.getDataStreams(new GetDataStreamAction.Request(new String[] { "ds" }))
|
||||
.get();
|
||||
assertEquals(1, ds.getDataStreams().size());
|
||||
assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size());
|
||||
assertEquals(DS_BACKING_INDEX_NAME, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName());
|
||||
|
||||
assertAcked(client().admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[] { "ds" })).get());
|
||||
}
|
||||
|
||||
public void testRename() throws Exception {
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareCreateSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
.setIncludeGlobalState(false)
|
||||
.get();
|
||||
|
||||
RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
|
||||
assertEquals(RestStatus.OK, status);
|
||||
|
||||
expectThrows(
|
||||
SnapshotRestoreException.class,
|
||||
() -> client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds").get()
|
||||
);
|
||||
|
||||
client.admin()
|
||||
.cluster()
|
||||
.prepareRestoreSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
@ -190,8 +199,10 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
.setRenameReplacement("ds2")
|
||||
.get();
|
||||
|
||||
GetDataStreamAction.Response ds = client.admin().indices().getDataStreams(
|
||||
new GetDataStreamAction.Request(new String[]{"ds2"})).get();
|
||||
GetDataStreamAction.Response ds = client.admin()
|
||||
.indices()
|
||||
.getDataStreams(new GetDataStreamAction.Request(new String[] { "ds2" }))
|
||||
.get();
|
||||
assertEquals(1, ds.getDataStreams().size());
|
||||
assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size());
|
||||
assertEquals(DS2_BACKING_INDEX_NAME, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName());
|
||||
@ -200,7 +211,8 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
}
|
||||
|
||||
public void testBackingIndexIsNotRenamedWhenRestoringDataStream() {
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster()
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareCreateSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
@ -210,17 +222,17 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
|
||||
assertEquals(RestStatus.OK, status);
|
||||
|
||||
expectThrows(SnapshotRestoreException.class, () -> client.admin().cluster()
|
||||
.prepareRestoreSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
.get());
|
||||
expectThrows(
|
||||
SnapshotRestoreException.class,
|
||||
() -> client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds").get()
|
||||
);
|
||||
|
||||
// delete data stream
|
||||
client.admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[]{"ds"})).actionGet();
|
||||
client.admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[] { "ds" })).actionGet();
|
||||
|
||||
// restore data stream attempting to rename the backing index
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster()
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareRestoreSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
@ -230,13 +242,14 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
|
||||
assertThat(restoreSnapshotResponse.status(), is(RestStatus.OK));
|
||||
|
||||
GetDataStreamAction.Request getDSRequest = new GetDataStreamAction.Request(new String[]{"ds"});
|
||||
GetDataStreamAction.Request getDSRequest = new GetDataStreamAction.Request(new String[] { "ds" });
|
||||
GetDataStreamAction.Response response = client.admin().indices().getDataStreams(getDSRequest).actionGet();
|
||||
assertThat(response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(), is(DS_BACKING_INDEX_NAME));
|
||||
}
|
||||
|
||||
public void testDataStreamAndBackingIndidcesAreRenamedUsingRegex() {
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster()
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareCreateSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
@ -246,14 +259,14 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
|
||||
assertEquals(RestStatus.OK, status);
|
||||
|
||||
expectThrows(SnapshotRestoreException.class, () -> client.admin().cluster()
|
||||
.prepareRestoreSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
.get());
|
||||
expectThrows(
|
||||
SnapshotRestoreException.class,
|
||||
() -> client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds").get()
|
||||
);
|
||||
|
||||
// restore data stream attempting to rename the backing index
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster()
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareRestoreSnapshot(REPO, SNAPSHOT)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
@ -264,19 +277,22 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
assertThat(restoreSnapshotResponse.status(), is(RestStatus.OK));
|
||||
|
||||
// assert "ds" was restored as "test-ds" and the backing index has a valid name
|
||||
GetDataStreamAction.Request getRenamedDS = new GetDataStreamAction.Request(new String[]{"test-ds"});
|
||||
GetDataStreamAction.Request getRenamedDS = new GetDataStreamAction.Request(new String[] { "test-ds" });
|
||||
GetDataStreamAction.Response response = client.admin().indices().getDataStreams(getRenamedDS).actionGet();
|
||||
assertThat(response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(),
|
||||
is(DataStream.getDefaultBackingIndexName("test-ds", 1L)));
|
||||
assertThat(
|
||||
response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(),
|
||||
is(DataStream.getDefaultBackingIndexName("test-ds", 1L))
|
||||
);
|
||||
|
||||
// data stream "ds" should still exist in the system
|
||||
GetDataStreamAction.Request getDSRequest = new GetDataStreamAction.Request(new String[]{"ds"});
|
||||
GetDataStreamAction.Request getDSRequest = new GetDataStreamAction.Request(new String[] { "ds" });
|
||||
response = client.admin().indices().getDataStreams(getDSRequest).actionGet();
|
||||
assertThat(response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(), is(DS_BACKING_INDEX_NAME));
|
||||
}
|
||||
|
||||
public void testWildcards() throws Exception {
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster()
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareCreateSnapshot(REPO, "snap2")
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("d*")
|
||||
@ -286,7 +302,8 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
|
||||
assertEquals(RestStatus.OK, status);
|
||||
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster()
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareRestoreSnapshot(REPO, "snap2")
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("d*")
|
||||
@ -296,17 +313,23 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
|
||||
assertEquals(RestStatus.OK, restoreSnapshotResponse.status());
|
||||
|
||||
GetDataStreamAction.Response ds = client.admin().indices().getDataStreams(
|
||||
new GetDataStreamAction.Request(new String[]{"ds2"})).get();
|
||||
GetDataStreamAction.Response ds = client.admin()
|
||||
.indices()
|
||||
.getDataStreams(new GetDataStreamAction.Request(new String[] { "ds2" }))
|
||||
.get();
|
||||
assertEquals(1, ds.getDataStreams().size());
|
||||
assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size());
|
||||
assertEquals(DS2_BACKING_INDEX_NAME, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName());
|
||||
assertThat("we renamed the restored data stream to one that doesn't match any existing composable template",
|
||||
ds.getDataStreams().get(0).getIndexTemplate(), is(nullValue()));
|
||||
assertThat(
|
||||
"we renamed the restored data stream to one that doesn't match any existing composable template",
|
||||
ds.getDataStreams().get(0).getIndexTemplate(),
|
||||
is(nullValue())
|
||||
);
|
||||
}
|
||||
|
||||
public void testDataStreamNotStoredWhenIndexRequested() throws Exception {
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster()
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareCreateSnapshot(REPO, "snap2")
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices(DS_BACKING_INDEX_NAME)
|
||||
@ -315,15 +338,15 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
|
||||
RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
|
||||
assertEquals(RestStatus.OK, status);
|
||||
expectThrows(Exception.class, () -> client.admin().cluster()
|
||||
.prepareRestoreSnapshot(REPO, "snap2")
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
.get());
|
||||
expectThrows(
|
||||
Exception.class,
|
||||
() -> client.admin().cluster().prepareRestoreSnapshot(REPO, "snap2").setWaitForCompletion(true).setIndices("ds").get()
|
||||
);
|
||||
}
|
||||
|
||||
public void testDataStreamNotRestoredWhenIndexRequested() throws Exception {
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster()
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareCreateSnapshot(REPO, "snap2")
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("ds")
|
||||
@ -333,10 +356,12 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
|
||||
assertEquals(RestStatus.OK, status);
|
||||
|
||||
assertTrue(client.admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[]{"ds"})).get()
|
||||
.isAcknowledged());
|
||||
assertTrue(
|
||||
client.admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[] { "ds" })).get().isAcknowledged()
|
||||
);
|
||||
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster()
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareRestoreSnapshot(REPO, "snap2")
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices(".ds-ds-*")
|
||||
@ -344,25 +369,28 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||
|
||||
assertEquals(RestStatus.OK, restoreSnapshotResponse.status());
|
||||
|
||||
GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(new String[]{"ds"});
|
||||
GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(new String[] { "ds" });
|
||||
expectThrows(ResourceNotFoundException.class, () -> client.admin().indices().getDataStreams(getRequest).actionGet());
|
||||
}
|
||||
|
||||
public void testDataStreamNotIncludedInLimitedSnapshot() throws ExecutionException, InterruptedException {
|
||||
final String snapshotName = "test-snap";
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster()
|
||||
.prepareCreateSnapshot(REPO, snapshotName)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("does-not-exist-*")
|
||||
.setIncludeGlobalState(true)
|
||||
.get();
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().state(), is(SnapshotState.SUCCESS));
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
.prepareCreateSnapshot(REPO, snapshotName)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("does-not-exist-*")
|
||||
.setIncludeGlobalState(true)
|
||||
.get();
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().state(), Matchers.is(SnapshotState.SUCCESS));
|
||||
|
||||
assertThat(client().admin().indices()
|
||||
.deleteDataStream(new DeleteDataStreamAction.Request(new String[]{"*"})).get().isAcknowledged(), is(true));
|
||||
assertThat(
|
||||
client().admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[] { "*" })).get().isAcknowledged(),
|
||||
is(true)
|
||||
);
|
||||
|
||||
final RestoreSnapshotResponse restoreSnapshotResponse =
|
||||
client().admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).get();
|
||||
final RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).get();
|
||||
assertThat(restoreSnapshotResponse.getRestoreInfo().indices(), empty());
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,104 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.datastreams;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.indices.datastream.DeleteDataStreamAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.common.collect.List;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase;
|
||||
import org.elasticsearch.snapshots.SnapshotInProgressException;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
import org.elasticsearch.snapshots.SnapshotState;
|
||||
import org.elasticsearch.snapshots.mockstore.MockRepository;
|
||||
import org.elasticsearch.xpack.datastreams.DataStreamsPlugin;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
// The tests in here do a lot of state updates and other writes to disk and are slowed down too much by WindowsFS
|
||||
@LuceneTestCase.SuppressFileSystems(value = "WindowsFS")
|
||||
public class ShardClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return List.of(MockRepository.Plugin.class, DataStreamsPlugin.class);
|
||||
}
|
||||
|
||||
public void testDeleteDataStreamDuringSnapshot() throws Exception {
|
||||
Client client = client();
|
||||
|
||||
createRepository(
|
||||
"test-repo",
|
||||
"mock",
|
||||
Settings.builder()
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
|
||||
.put("block_on_data", true)
|
||||
);
|
||||
|
||||
String dataStream = "datastream";
|
||||
DataStreamIT.putComposableIndexTemplate("dst", "@timestamp", Collections.singletonList(dataStream));
|
||||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
client.prepareIndex(dataStream, "_doc")
|
||||
.setOpType(DocWriteRequest.OpType.CREATE)
|
||||
.setId(Integer.toString(i))
|
||||
.setSource(Collections.singletonMap("@timestamp", "2020-12-12"))
|
||||
.execute()
|
||||
.actionGet();
|
||||
}
|
||||
refresh();
|
||||
assertDocCount(dataStream, 100L);
|
||||
|
||||
logger.info("--> snapshot");
|
||||
ActionFuture<CreateSnapshotResponse> future = client.admin()
|
||||
.cluster()
|
||||
.prepareCreateSnapshot("test-repo", "test-snap")
|
||||
.setIndices(dataStream)
|
||||
.setWaitForCompletion(true)
|
||||
.setPartial(false)
|
||||
.execute();
|
||||
logger.info("--> wait for block to kick in");
|
||||
waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1));
|
||||
|
||||
// non-partial snapshots do not allow delete operations on data streams where snapshot has not been completed
|
||||
try {
|
||||
logger.info("--> delete index while non-partial snapshot is running");
|
||||
client.admin().indices().deleteDataStream(new DeleteDataStreamAction.Request(new String[] { dataStream })).actionGet();
|
||||
fail("Expected deleting index to fail during snapshot");
|
||||
} catch (SnapshotInProgressException e) {
|
||||
assertThat(e.getMessage(), containsString("Cannot delete data streams that are being snapshotted: [" + dataStream));
|
||||
} finally {
|
||||
logger.info("--> unblock all data nodes");
|
||||
unblockAllDataNodes("test-repo");
|
||||
}
|
||||
logger.info("--> waiting for snapshot to finish");
|
||||
CreateSnapshotResponse createSnapshotResponse = future.get();
|
||||
|
||||
logger.info("Snapshot successfully completed");
|
||||
SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo();
|
||||
assertThat(snapshotInfo.state(), equalTo((SnapshotState.SUCCESS)));
|
||||
assertThat(snapshotInfo.dataStreams(), contains(dataStream));
|
||||
assertThat(snapshotInfo.indices(), contains(DataStream.getDefaultBackingIndexName(dataStream, 1)));
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.datastreams;
|
||||
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.xpack.datastreams.mapper.DataStreamTimestampFieldMapper;
|
||||
import org.elasticsearch.plugins.MapperPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.action.ActionModule.DATASTREAMS_FEATURE_ENABLED;
|
||||
|
||||
public class DataStreamsPlugin extends Plugin implements MapperPlugin {
|
||||
|
||||
@Override
|
||||
public Map<String, MetadataFieldMapper.TypeParser> getMetadataMappers() {
|
||||
if (DATASTREAMS_FEATURE_ENABLED) {
|
||||
return Collections.singletonMap(DataStreamTimestampFieldMapper.NAME, new DataStreamTimestampFieldMapper.TypeParser());
|
||||
} else {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
}
|
||||
}
|
@ -1,23 +1,10 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
package org.elasticsearch.xpack.datastreams.mapper;
|
||||
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
@ -28,6 +15,15 @@ import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentFieldMappers;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TextSearchInfo;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -41,11 +37,11 @@ import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
|
||||
public class TimestampFieldMapper extends MetadataFieldMapper {
|
||||
public class DataStreamTimestampFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
public static final String NAME = "_timestamp";
|
||||
public static final String NAME = "_data_stream_timestamp";
|
||||
|
||||
public static class Defaults {
|
||||
public static class Defaults {
|
||||
|
||||
public static final FieldType TIMESTAMP_FIELD_TYPE = new FieldType();
|
||||
|
||||
@ -94,20 +90,15 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper build(BuilderContext context) {
|
||||
return new TimestampFieldMapper(
|
||||
fieldType,
|
||||
new TimestampFieldType(),
|
||||
path
|
||||
);
|
||||
return new DataStreamTimestampFieldMapper(fieldType, new TimestampFieldType(), path);
|
||||
}
|
||||
}
|
||||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder<?> parse(String name,
|
||||
Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException {
|
||||
public MetadataFieldMapper.Builder<?> parse(String name, Map<String, Object> node, ParserContext parserContext)
|
||||
throws MapperParsingException {
|
||||
Builder builder = new Builder();
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
@ -123,14 +114,13 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext parserContext) {
|
||||
return new TimestampFieldMapper(Defaults.TIMESTAMP_FIELD_TYPE,
|
||||
new TimestampFieldType(), null);
|
||||
return new DataStreamTimestampFieldMapper(Defaults.TIMESTAMP_FIELD_TYPE, new TimestampFieldType(), null);
|
||||
}
|
||||
}
|
||||
|
||||
private final String path;
|
||||
|
||||
private TimestampFieldMapper(FieldType fieldType, MappedFieldType mappedFieldType, String path) {
|
||||
private DataStreamTimestampFieldMapper(FieldType fieldType, MappedFieldType mappedFieldType, String path) {
|
||||
super(fieldType, mappedFieldType);
|
||||
this.path = path;
|
||||
}
|
||||
@ -146,11 +136,19 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
||||
throw new IllegalArgumentException("the configured timestamp field [" + path + "] does not exist");
|
||||
}
|
||||
|
||||
if (DateFieldMapper.CONTENT_TYPE.equals(mapper.typeName()) == false &&
|
||||
DateFieldMapper.DATE_NANOS_CONTENT_TYPE.equals(mapper.typeName()) == false) {
|
||||
throw new IllegalArgumentException("the configured timestamp field [" + path + "] is of type [" +
|
||||
mapper.typeName() + "], but [" + DateFieldMapper.CONTENT_TYPE + "," + DateFieldMapper.DATE_NANOS_CONTENT_TYPE +
|
||||
"] is expected");
|
||||
if (DateFieldMapper.CONTENT_TYPE.equals(mapper.typeName()) == false
|
||||
&& DateFieldMapper.DATE_NANOS_CONTENT_TYPE.equals(mapper.typeName()) == false) {
|
||||
throw new IllegalArgumentException(
|
||||
"the configured timestamp field ["
|
||||
+ path
|
||||
+ "] is of type ["
|
||||
+ mapper.typeName()
|
||||
+ "], but ["
|
||||
+ DateFieldMapper.CONTENT_TYPE
|
||||
+ ","
|
||||
+ DateFieldMapper.DATE_NANOS_CONTENT_TYPE
|
||||
+ "] is expected"
|
||||
);
|
||||
}
|
||||
|
||||
DateFieldMapper dateFieldMapper = (DateFieldMapper) mapper;
|
||||
@ -161,22 +159,24 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
||||
throw new IllegalArgumentException("the configured timestamp field [" + path + "] doesn't have doc values");
|
||||
}
|
||||
if (dateFieldMapper.getNullValue() != null) {
|
||||
throw new IllegalArgumentException("the configured timestamp field [" + path +
|
||||
"] has disallowed [null_value] attribute specified");
|
||||
throw new IllegalArgumentException(
|
||||
"the configured timestamp field [" + path + "] has disallowed [null_value] attribute specified"
|
||||
);
|
||||
}
|
||||
if (dateFieldMapper.getIgnoreMalformed().explicit()) {
|
||||
throw new IllegalArgumentException("the configured timestamp field [" + path +
|
||||
"] has disallowed [ignore_malformed] attribute specified");
|
||||
throw new IllegalArgumentException(
|
||||
"the configured timestamp field [" + path + "] has disallowed [ignore_malformed] attribute specified"
|
||||
);
|
||||
}
|
||||
|
||||
// Catch all validation that validates whether disallowed mapping attributes have been specified
|
||||
// on the field this meta field refers to:
|
||||
try (XContentBuilder builder = jsonBuilder()) {
|
||||
builder.startObject();
|
||||
dateFieldMapper.doXContentBody(builder, false, EMPTY_PARAMS);
|
||||
dateFieldMapper.toXContent(builder, EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
Map<String, Object> configuredSettings =
|
||||
XContentHelper.convertToMap(BytesReference.bytes(builder), false, XContentType.JSON).v2();
|
||||
Map<?, ?> configuredSettings = XContentHelper.convertToMap(BytesReference.bytes(builder), false, XContentType.JSON).v2();
|
||||
configuredSettings = (Map<?, ?>) configuredSettings.values().iterator().next();
|
||||
|
||||
// Only type, meta and format attributes are allowed:
|
||||
configuredSettings.remove("type");
|
||||
@ -184,8 +184,9 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
||||
configuredSettings.remove("format");
|
||||
// All other configured attributes are not allowed:
|
||||
if (configuredSettings.isEmpty() == false) {
|
||||
throw new IllegalArgumentException("the configured timestamp field [@timestamp] has disallowed attributes: " +
|
||||
configuredSettings.keySet());
|
||||
throw new IllegalArgumentException(
|
||||
"the configured timestamp field [@timestamp] has disallowed attributes: " + configuredSettings.keySet()
|
||||
);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
@ -197,8 +198,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preParse(ParseContext context) throws IOException {
|
||||
}
|
||||
public void preParse(ParseContext context) throws IOException {}
|
||||
|
||||
@Override
|
||||
protected void parseCreateField(ParseContext context) throws IOException {
|
||||
@ -218,10 +218,9 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
||||
throw new IllegalArgumentException("data stream timestamp field [" + path + "] is missing");
|
||||
}
|
||||
|
||||
long numberOfValues =
|
||||
Arrays.stream(fields)
|
||||
.filter(indexableField -> indexableField.fieldType().docValuesType() == DocValuesType.SORTED_NUMERIC)
|
||||
.count();
|
||||
long numberOfValues = Arrays.stream(fields)
|
||||
.filter(indexableField -> indexableField.fieldType().docValuesType() == DocValuesType.SORTED_NUMERIC)
|
||||
.count();
|
||||
if (numberOfValues > 1) {
|
||||
throw new IllegalArgumentException("data stream timestamp field [" + path + "] encountered multiple values");
|
||||
}
|
||||
@ -255,9 +254,9 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
@Override
|
||||
protected void mergeOptions(FieldMapper other, List<String> conflicts) {
|
||||
TimestampFieldMapper otherTimestampFieldMapper = (TimestampFieldMapper) other;
|
||||
if (Objects.equals(path, otherTimestampFieldMapper.path) == false) {
|
||||
conflicts.add("cannot update path setting for [_timestamp]");
|
||||
}
|
||||
DataStreamTimestampFieldMapper otherTimestampFieldMapper = (DataStreamTimestampFieldMapper) other;
|
||||
if (Objects.equals(path, otherTimestampFieldMapper.path) == false) {
|
||||
conflicts.add("cannot update path setting for [_data_stream_timestamp]");
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,362 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.datastreams.mapper;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.List;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperParser;
|
||||
import org.elasticsearch.index.mapper.MapperException;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.xpack.datastreams.DataStreamsPlugin;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.elasticsearch.index.MapperTestUtils.assertConflicts;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class DataStreamTimestampFieldMapperTests extends ESSingleNodeTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return List.of(DataStreamsPlugin.class);
|
||||
}
|
||||
|
||||
public void testPostParse() throws IOException {
|
||||
String mapping = Strings.toString(
|
||||
XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.startObject("_data_stream_timestamp")
|
||||
.field("path", "@timestamp")
|
||||
.endObject()
|
||||
.startObject("properties")
|
||||
.startObject("@timestamp")
|
||||
.field("type", randomBoolean() ? "date" : "date_nanos")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
);
|
||||
DocumentMapper docMapper = createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE);
|
||||
|
||||
ParsedDocument doc = docMapper.parse(
|
||||
new SourceToParse(
|
||||
"test",
|
||||
"type",
|
||||
"1",
|
||||
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("@timestamp", "2020-12-12").endObject()),
|
||||
XContentType.JSON
|
||||
)
|
||||
);
|
||||
assertThat(doc.rootDoc().getFields("@timestamp").length, equalTo(2));
|
||||
|
||||
Exception e = expectThrows(
|
||||
MapperException.class,
|
||||
() -> docMapper.parse(
|
||||
new SourceToParse(
|
||||
"test",
|
||||
"type",
|
||||
"1",
|
||||
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("@timestamp1", "2020-12-12").endObject()),
|
||||
XContentType.JSON
|
||||
)
|
||||
)
|
||||
);
|
||||
assertThat(e.getCause().getMessage(), equalTo("data stream timestamp field [@timestamp] is missing"));
|
||||
|
||||
e = expectThrows(
|
||||
MapperException.class,
|
||||
() -> docMapper.parse(
|
||||
new SourceToParse(
|
||||
"test",
|
||||
"type",
|
||||
"1",
|
||||
BytesReference.bytes(
|
||||
XContentFactory.jsonBuilder().startObject().array("@timestamp", "2020-12-12", "2020-12-13").endObject()
|
||||
),
|
||||
XContentType.JSON
|
||||
)
|
||||
)
|
||||
);
|
||||
assertThat(e.getCause().getMessage(), equalTo("data stream timestamp field [@timestamp] encountered multiple values"));
|
||||
}
|
||||
|
||||
public void testValidateNonExistingField() throws IOException {
|
||||
String mapping = Strings.toString(
|
||||
XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.startObject("_data_stream_timestamp")
|
||||
.field("path", "non-existing-field")
|
||||
.endObject()
|
||||
.startObject("properties")
|
||||
.startObject("@timestamp")
|
||||
.field("type", "date")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
);
|
||||
|
||||
Exception e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE)
|
||||
);
|
||||
assertThat(e.getMessage(), equalTo("the configured timestamp field [non-existing-field] does not exist"));
|
||||
}
|
||||
|
||||
public void testValidateInvalidFieldType() throws IOException {
|
||||
String mapping = Strings.toString(
|
||||
XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.startObject("_data_stream_timestamp")
|
||||
.field("path", "@timestamp")
|
||||
.endObject()
|
||||
.startObject("properties")
|
||||
.startObject("@timestamp")
|
||||
.field("type", "keyword")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
);
|
||||
|
||||
Exception e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE)
|
||||
);
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
equalTo("the configured timestamp field [@timestamp] is of type [keyword], but [date,date_nanos] is expected")
|
||||
);
|
||||
}
|
||||
|
||||
public void testValidateNotIndexed() throws IOException {
|
||||
String mapping = Strings.toString(
|
||||
XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.startObject("_data_stream_timestamp")
|
||||
.field("path", "@timestamp")
|
||||
.endObject()
|
||||
.startObject("properties")
|
||||
.startObject("@timestamp")
|
||||
.field("type", "date")
|
||||
.field("index", "false")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
);
|
||||
|
||||
Exception e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE)
|
||||
);
|
||||
assertThat(e.getMessage(), equalTo("the configured timestamp field [@timestamp] is not indexed"));
|
||||
}
|
||||
|
||||
public void testValidateNotDocValues() throws IOException {
|
||||
String mapping = Strings.toString(
|
||||
XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.startObject("_data_stream_timestamp")
|
||||
.field("path", "@timestamp")
|
||||
.endObject()
|
||||
.startObject("properties")
|
||||
.startObject("@timestamp")
|
||||
.field("type", "date")
|
||||
.field("doc_values", "false")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
);
|
||||
|
||||
Exception e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE)
|
||||
);
|
||||
assertThat(e.getMessage(), equalTo("the configured timestamp field [@timestamp] doesn't have doc values"));
|
||||
}
|
||||
|
||||
public void testValidateNullValue() throws IOException {
|
||||
String mapping = Strings.toString(
|
||||
XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.startObject("_data_stream_timestamp")
|
||||
.field("path", "@timestamp")
|
||||
.endObject()
|
||||
.startObject("properties")
|
||||
.startObject("@timestamp")
|
||||
.field("type", "date")
|
||||
.field("null_value", "2020-12-12")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
);
|
||||
|
||||
Exception e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE)
|
||||
);
|
||||
assertThat(e.getMessage(), equalTo("the configured timestamp field [@timestamp] has disallowed [null_value] attribute specified"));
|
||||
}
|
||||
|
||||
public void testValidateIgnoreMalformed() throws IOException {
|
||||
String mapping = Strings.toString(
|
||||
XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.startObject("_data_stream_timestamp")
|
||||
.field("path", "@timestamp")
|
||||
.endObject()
|
||||
.startObject("properties")
|
||||
.startObject("@timestamp")
|
||||
.field("type", "date")
|
||||
.field("ignore_malformed", "true")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
);
|
||||
|
||||
Exception e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE)
|
||||
);
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
equalTo("the configured timestamp field [@timestamp] has disallowed [ignore_malformed] attribute specified")
|
||||
);
|
||||
}
|
||||
|
||||
public void testValidateNotDisallowedAttribute() throws IOException {
|
||||
String mapping = Strings.toString(
|
||||
XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.startObject("_data_stream_timestamp")
|
||||
.field("path", "@timestamp")
|
||||
.endObject()
|
||||
.startObject("properties")
|
||||
.startObject("@timestamp")
|
||||
.field("type", "date")
|
||||
.field("store", "true")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
);
|
||||
|
||||
Exception e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE)
|
||||
);
|
||||
assertThat(e.getMessage(), equalTo("the configured timestamp field [@timestamp] has disallowed attributes: [store]"));
|
||||
}
|
||||
|
||||
public void testCannotUpdateTimestampField() throws IOException {
|
||||
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
||||
String mapping1 =
|
||||
"{\"type\":{\"_data_stream_timestamp\":{\"path\":\"@timestamp\"}, \"properties\": {\"@timestamp\": {\"type\": \"date\"}}}}}";
|
||||
String mapping2 = "{\"type\":{\"_data_stream_timestamp\":{\"path\":\"@timestamp2\"}, \"properties\": {\"@timestamp2\": "
|
||||
+ "{\"type\": \"date\"},\"@timestamp\": {\"type\": \"date\"}}}})";
|
||||
assertConflicts(mapping1, mapping2, parser, "cannot update path setting for [_data_stream_timestamp]");
|
||||
|
||||
mapping1 = "{\"type\":{\"properties\":{\"@timestamp\": {\"type\": \"date\"}}}}}";
|
||||
mapping2 = "{\"type\":{\"_data_stream_timestamp\":{\"path\":\"@timestamp2\"}, \"properties\": "
|
||||
+ "{\"@timestamp2\": {\"type\": \"date\"},\"@timestamp\": {\"type\": \"date\"}}}})";
|
||||
assertConflicts(mapping1, mapping2, parser, "cannot update path setting for [_data_stream_timestamp]");
|
||||
}
|
||||
|
||||
public void testDifferentTSField() throws IOException {
|
||||
String mapping = "{\n"
|
||||
+ " \"_data_stream_timestamp\": {\n"
|
||||
+ " \"path\": \"event.my_timestamp\"\n"
|
||||
+ " },\n"
|
||||
+ " \"properties\": {\n"
|
||||
+ " \"event\": {\n"
|
||||
+ " \"properties\": {\n"
|
||||
+ " \"my_timestamp\": {\n"
|
||||
+ " \"type\": \"date\""
|
||||
+ " }\n"
|
||||
+ " }\n"
|
||||
+ " }\n"
|
||||
+ " }\n"
|
||||
+ " }";
|
||||
DocumentMapper docMapper = createIndex("test").mapperService()
|
||||
.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE);
|
||||
|
||||
ParsedDocument doc = docMapper.parse(
|
||||
new SourceToParse(
|
||||
"test",
|
||||
"type",
|
||||
"1",
|
||||
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("event.my_timestamp", "2020-12-12").endObject()),
|
||||
XContentType.JSON
|
||||
)
|
||||
);
|
||||
assertThat(doc.rootDoc().getFields("event.my_timestamp").length, equalTo(2));
|
||||
|
||||
Exception e = expectThrows(
|
||||
MapperException.class,
|
||||
() -> docMapper.parse(
|
||||
new SourceToParse(
|
||||
"test",
|
||||
"type",
|
||||
"1",
|
||||
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("event.timestamp", "2020-12-12").endObject()),
|
||||
XContentType.JSON
|
||||
)
|
||||
)
|
||||
);
|
||||
assertThat(e.getCause().getMessage(), equalTo("data stream timestamp field [event.my_timestamp] is missing"));
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.datastreams.mapper;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.collect.List;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.MapperTestUtils;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.datastreams.DataStreamsPlugin;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.cluster.DataStreamTestHelper.generateMapping;
|
||||
import static org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class MetadataCreateDataStreamServiceTests extends ESTestCase {
|
||||
|
||||
public void testValidateTimestampFieldMapping() throws Exception {
|
||||
String mapping = generateMapping("@timestamp", "date");
|
||||
validateTimestampFieldMapping("@timestamp", createMapperService(mapping));
|
||||
mapping = generateMapping("@timestamp", "date_nanos");
|
||||
validateTimestampFieldMapping("@timestamp", createMapperService(mapping));
|
||||
}
|
||||
|
||||
public void testValidateTimestampFieldMappingNoFieldMapping() {
|
||||
Exception e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> validateTimestampFieldMapping("@timestamp", createMapperService("{}"))
|
||||
);
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
equalTo("[_data_stream_timestamp] meta field doesn't point to data stream timestamp field [@timestamp]")
|
||||
);
|
||||
|
||||
String mapping = generateMapping("@timestamp2", "date");
|
||||
e = expectThrows(IllegalArgumentException.class, () -> validateTimestampFieldMapping("@timestamp", createMapperService(mapping)));
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
equalTo("[_data_stream_timestamp] meta field doesn't point to data stream timestamp field [@timestamp]")
|
||||
);
|
||||
}
|
||||
|
||||
public void testValidateTimestampFieldMappingInvalidFieldType() {
|
||||
String mapping = generateMapping("@timestamp", "keyword");
|
||||
Exception e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> validateTimestampFieldMapping("@timestamp", createMapperService(mapping))
|
||||
);
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
equalTo("the configured timestamp field [@timestamp] is of type [keyword], " + "but [date,date_nanos] is expected")
|
||||
);
|
||||
}
|
||||
|
||||
MapperService createMapperService(String mapping) throws IOException {
|
||||
String indexName = "test";
|
||||
IndexMetadata indexMetadata = IndexMetadata.builder(indexName)
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
)
|
||||
.putMapping("_doc", mapping)
|
||||
.build();
|
||||
IndicesModule indicesModule = new IndicesModule(List.of(new DataStreamsPlugin()));
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(
|
||||
xContentRegistry(),
|
||||
createTempDir(),
|
||||
Settings.EMPTY,
|
||||
indicesModule,
|
||||
indexName
|
||||
);
|
||||
mapperService.merge(indexMetadata, MapperService.MergeReason.MAPPING_UPDATE);
|
||||
return mapperService;
|
||||
}
|
||||
|
||||
}
|
@ -15,7 +15,6 @@ import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.BooleanFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ObjectMapper;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig;
|
||||
@ -54,7 +53,7 @@ public class ExtractedFieldsDetector {
|
||||
*/
|
||||
private static final List<String> IGNORE_FIELDS = Arrays.asList("_id", "_field_names", "_index", "_parent", "_routing", "_seq_no",
|
||||
"_source", "_type", "_uid", "_version", "_feature", "_ignored", DestinationIndex.ID_COPY,
|
||||
TimestampFieldMapper.NAME);
|
||||
"_data_stream_timestamp");
|
||||
|
||||
private final String[] index;
|
||||
private final DataFrameAnalyticsConfig config;
|
||||
|
@ -5,17 +5,24 @@
|
||||
*/
|
||||
package org.elasticsearch.xpack.restart;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.cluster.metadata.Template;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ObjectPath;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
@ -39,6 +46,7 @@ import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Base64;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
@ -47,6 +55,8 @@ import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.upgrades.FullClusterRestartIT.assertNumHits;
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
@ -862,4 +872,56 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
||||
assertNoFileBasedRecovery(index, n -> true);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testDataStreams() throws Exception {
|
||||
assumeTrue("no data streams in versions before " + Version.V_7_9_0, getOldClusterVersion().onOrAfter(Version.V_7_9_0));
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
String mapping = "{\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"@timestamp\": {\n" +
|
||||
" \"type\": \"date\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
Template template = new Template(null, new CompressedXContent(mapping), null);
|
||||
createComposableTemplate(client(), "dst", "ds", template);
|
||||
|
||||
Request indexRequest = new Request("POST", "/ds/_doc/1?op_type=create&refresh");
|
||||
XContentBuilder builder = JsonXContent.contentBuilder().startObject()
|
||||
.field("f", "v")
|
||||
.field("@timestamp", new Date())
|
||||
.endObject();
|
||||
indexRequest.setJsonEntity(Strings.toString(builder));
|
||||
assertOK(client().performRequest(indexRequest));
|
||||
}
|
||||
|
||||
Request getDataStream = new Request("GET", "/_data_stream/ds");
|
||||
Response response = client().performRequest(getDataStream);
|
||||
assertOK(response);
|
||||
List<Object> dataStreams = (List<Object>) entityAsMap(response).get("data_streams");
|
||||
assertEquals(1, dataStreams.size());
|
||||
Map<String, Object> ds = (Map<String, Object>) dataStreams.get(0);
|
||||
List<Map<String, String>> indices = (List<Map<String, String>>) ds.get("indices");
|
||||
assertEquals("ds", ds.get("name"));
|
||||
assertEquals(1, indices.size());
|
||||
assertEquals(DataStream.getDefaultBackingIndexName("ds", 1), indices.get(0).get("index_name"));
|
||||
assertNumHits("ds", 1, 1);
|
||||
}
|
||||
|
||||
private static void createComposableTemplate(RestClient client, String templateName, String indexPattern, Template template)
|
||||
throws IOException {
|
||||
XContentBuilder builder = jsonBuilder();
|
||||
template.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
StringEntity templateJSON = new StringEntity(
|
||||
String.format(Locale.ROOT, "{\n" +
|
||||
" \"index_patterns\": \"%s\",\n" +
|
||||
" \"data_stream\": { \"timestamp_field\": \"@timestamp\" },\n" +
|
||||
" \"template\": %s\n" +
|
||||
"}", indexPattern, Strings.toString(builder)),
|
||||
ContentType.APPLICATION_JSON);
|
||||
Request createIndexTemplateRequest = new Request("PUT", "_index_template/" + templateName);
|
||||
createIndexTemplateRequest.setEntity(templateJSON);
|
||||
client.performRequest(createIndexTemplateRequest);
|
||||
}
|
||||
}
|
||||
|
@ -21,30 +21,30 @@
|
||||
- match: {indices.2.attributes.0: hidden}
|
||||
- match: {indices.2.attributes.1: open}
|
||||
- match: {indices.2.data_stream: simple-data-stream2}
|
||||
- match: {indices.3.name: my_remote_cluster:ccs_duel_index}
|
||||
- match: {indices.3.name: my_remote_cluster:.security-7}
|
||||
- match: {indices.3.attributes.0: open}
|
||||
- match: {indices.4.name: my_remote_cluster:ccs_duel_index_empty}
|
||||
- match: {indices.4.attributes.0: open}
|
||||
- match: {indices.5.name: my_remote_cluster:ccs_duel_index_err}
|
||||
- match: {indices.4.name: my_remote_cluster:closed_index}
|
||||
- match: {indices.4.aliases.0: aliased_closed_index}
|
||||
- match: {indices.4.attributes.0: closed}
|
||||
- match: {indices.5.name: my_remote_cluster:field_caps_index_1}
|
||||
- match: {indices.5.attributes.0: open}
|
||||
- match: {indices.6.name: my_remote_cluster:closed_index}
|
||||
- match: {indices.6.aliases.0: aliased_closed_index}
|
||||
- match: {indices.6.attributes.0: closed}
|
||||
- match: {indices.7.name: my_remote_cluster:field_caps_empty_index}
|
||||
- match: {indices.6.name: my_remote_cluster:field_caps_index_3}
|
||||
- match: {indices.6.attributes.0: open}
|
||||
- match: {indices.7.name: my_remote_cluster:secured_via_alias}
|
||||
- match: {indices.7.attributes.0: open}
|
||||
- match: {indices.8.name: my_remote_cluster:field_caps_index_1}
|
||||
- match: {indices.8.name: my_remote_cluster:single_doc_index}
|
||||
- match: {indices.8.attributes.0: open}
|
||||
- match: {indices.9.name: my_remote_cluster:field_caps_index_3}
|
||||
- match: {indices.9.name: my_remote_cluster:test_index}
|
||||
- match: {indices.9.aliases.0: aliased_test_index}
|
||||
- match: {indices.9.attributes.0: open}
|
||||
- match: {indices.10.name: my_remote_cluster:single_doc_index}
|
||||
- match: {indices.10.attributes.0: open}
|
||||
- match: {indices.11.name: my_remote_cluster:test_index}
|
||||
- match: {indices.11.aliases.0: aliased_test_index}
|
||||
- match: {indices.11.attributes.0: open}
|
||||
- match: {aliases.0.name: my_remote_cluster:aliased_closed_index}
|
||||
- match: {aliases.0.indices.0: closed_index}
|
||||
- match: {aliases.1.name: my_remote_cluster:aliased_test_index}
|
||||
- match: {aliases.1.indices.0: test_index}
|
||||
- match: {aliases.0.name: my_remote_cluster:.security}
|
||||
- match: {aliases.0.indices.0: .security-7}
|
||||
- match: {aliases.1.name: my_remote_cluster:aliased_closed_index}
|
||||
- match: {aliases.1.indices.0: closed_index}
|
||||
- match: {aliases.2.name: my_remote_cluster:aliased_test_index}
|
||||
- match: {aliases.2.indices.0: test_index}
|
||||
- match: {aliases.3.name: my_remote_cluster:secure_alias}
|
||||
- match: {aliases.3.indices.0: secured_via_alias}
|
||||
- match: {data_streams.0.name: my_remote_cluster:simple-data-stream1}
|
||||
- match: {data_streams.0.backing_indices.0: .ds-simple-data-stream1-000001}
|
||||
- match: {data_streams.0.timestamp_field: "@timestamp"}
|
@ -53,6 +53,60 @@ setup:
|
||||
}
|
||||
---
|
||||
"Index data and search on the remote cluster":
|
||||
- skip:
|
||||
features: allowed_warnings
|
||||
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template1
|
||||
body:
|
||||
index_patterns: [simple-data-stream1]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
- do:
|
||||
allowed_warnings:
|
||||
- "index template [my-template2] has index patterns [simple-data-stream2] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template2] will take precedence during new index creation"
|
||||
indices.put_index_template:
|
||||
name: my-template2
|
||||
body:
|
||||
index_patterns: [simple-data-stream2]
|
||||
template:
|
||||
mappings:
|
||||
properties:
|
||||
'@timestamp':
|
||||
type: date
|
||||
data_stream:
|
||||
timestamp_field: '@timestamp'
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream1
|
||||
|
||||
- do:
|
||||
indices.create_data_stream:
|
||||
name: simple-data-stream2
|
||||
|
||||
- do:
|
||||
indices.rollover:
|
||||
alias: "simple-data-stream2"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: closed_index
|
||||
body:
|
||||
aliases:
|
||||
aliased_closed_index: {}
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: closed_index
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
@ -5,6 +5,7 @@
|
||||
*/
|
||||
package org.elasticsearch.upgrades;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
@ -26,6 +27,9 @@ public abstract class AbstractUpgradeTestCase extends ESRestTestCase {
|
||||
private static final String BASIC_AUTH_VALUE =
|
||||
basicAuthHeaderValue("test_user", SecuritySettingsSourceField.TEST_PASSWORD);
|
||||
|
||||
protected static final Version UPGRADE_FROM_VERSION =
|
||||
Version.fromString(System.getProperty("tests.upgrade_from_version"));
|
||||
|
||||
@Override
|
||||
protected boolean preserveIndicesUponCompletion() {
|
||||
return true;
|
||||
@ -51,6 +55,11 @@ public abstract class AbstractUpgradeTestCase extends ESRestTestCase {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean preserveDataStreamsUponCompletion() {
|
||||
return true;
|
||||
}
|
||||
|
||||
enum ClusterType {
|
||||
OLD,
|
||||
MIXED,
|
||||
|
@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.upgrades;
|
||||
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import static org.elasticsearch.upgrades.IndexingIT.assertCount;
|
||||
|
||||
public class DataStreamsUpgradeIT extends AbstractUpgradeTestCase {
|
||||
|
||||
public void testDataStreams() throws IOException {
|
||||
assumeTrue("data streams supported from 7.9.0", UPGRADE_FROM_VERSION.onOrAfter(Version.V_7_9_0));
|
||||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
String requestBody = "{\n" +
|
||||
" \"index_patterns\":[\"logs-*\"],\n" +
|
||||
" \"template\": {\n" +
|
||||
" \"mappings\": {\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"@timestamp\": {\n" +
|
||||
" \"type\": \"date\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" },\n" +
|
||||
" \"data_stream\":{\n" +
|
||||
" \"timestamp_field\":\"@timestamp\"" +
|
||||
" }\n" +
|
||||
" }";
|
||||
Request request = new Request("PUT", "/_index_template/1");
|
||||
request.setJsonEntity(requestBody);
|
||||
client().performRequest(request);
|
||||
|
||||
StringBuilder b = new StringBuilder();
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
b.append("{\"create\":{\"_index\":\"").append("logs-foobar").append("\"}}\n");
|
||||
b.append("{\"@timestamp\":\"2020-12-12\",\"test\":\"value").append(i).append("\"}\n");
|
||||
}
|
||||
Request bulk = new Request("POST", "/_bulk");
|
||||
bulk.addParameter("refresh", "true");
|
||||
bulk.addParameter("filter_path", "errors");
|
||||
bulk.setJsonEntity(b.toString());
|
||||
Response response = client().performRequest(bulk);
|
||||
assertEquals("{\"errors\":false}", EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8));
|
||||
} else if (CLUSTER_TYPE == ClusterType.MIXED) {
|
||||
Request rolloverRequest = new Request("POST", "/logs-foobar/_rollover");
|
||||
client().performRequest(rolloverRequest);
|
||||
|
||||
Request index = new Request("POST", "/logs-foobar/_doc");
|
||||
index.addParameter("refresh", "true");
|
||||
index.addParameter("filter_path", "_index");
|
||||
if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) {
|
||||
index.setJsonEntity("{\"@timestamp\":\"2020-12-12\",\"test\":\"value1000\"}");
|
||||
Response response = client().performRequest(index);
|
||||
assertEquals("{\"_index\":\".ds-logs-foobar-000002\"}",
|
||||
EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8));
|
||||
} else {
|
||||
index.setJsonEntity("{\"@timestamp\":\"2020-12-12\",\"test\":\"value1001\"}");
|
||||
Response response = client().performRequest(index);
|
||||
assertEquals("{\"_index\":\".ds-logs-foobar-000003\"}",
|
||||
EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8));
|
||||
}
|
||||
}
|
||||
|
||||
final int expectedCount;
|
||||
if (CLUSTER_TYPE.equals(ClusterType.OLD)) {
|
||||
expectedCount = 1000;
|
||||
} else if (CLUSTER_TYPE.equals(ClusterType.MIXED)) {
|
||||
if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) {
|
||||
expectedCount = 1001;
|
||||
} else {
|
||||
expectedCount = 1002;
|
||||
}
|
||||
} else if (CLUSTER_TYPE.equals(ClusterType.UPGRADED)) {
|
||||
expectedCount = 1002;
|
||||
} else {
|
||||
throw new AssertionError("unexpected cluster type");
|
||||
}
|
||||
assertCount("logs-foobar", expectedCount);
|
||||
}
|
||||
|
||||
}
|
@ -144,7 +144,7 @@ public class IndexingIT extends AbstractUpgradeTestCase {
|
||||
client().performRequest(bulk);
|
||||
}
|
||||
|
||||
private void assertCount(String index, int count) throws IOException {
|
||||
static void assertCount(String index, int count) throws IOException {
|
||||
Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search");
|
||||
searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true");
|
||||
searchTestIndexRequest.addParameter("filter_path", "hits.total");
|
||||
|
Loading…
x
Reference in New Issue
Block a user