diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java index 8465ae83428..34bcb595c20 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java @@ -19,16 +19,20 @@ package org.elasticsearch.client.dataframe.transforms; +import org.elasticsearch.Version; import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfig; +import org.elasticsearch.client.dataframe.transforms.util.TimeUtil; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.time.Instant; import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -40,6 +44,8 @@ public class DataFrameTransformConfig implements ToXContentObject { public static final ParseField SOURCE = new ParseField("source"); public static final ParseField DEST = new ParseField("dest"); public static final ParseField DESCRIPTION = new ParseField("description"); + public static final ParseField VERSION = new ParseField("version"); + public static final ParseField CREATE_TIME = new ParseField("create_time"); // types of transforms public static final ParseField PIVOT_TRANSFORM = new ParseField("pivot"); @@ -48,6 +54,8 @@ public class DataFrameTransformConfig implements ToXContentObject { private final DestConfig dest; private final PivotConfig pivotConfig; private final String description; + private final Version transformVersion; + private final Instant createTime; public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_frame_transform", true, @@ -57,7 +65,9 @@ public class DataFrameTransformConfig implements ToXContentObject { DestConfig dest = (DestConfig) args[2]; PivotConfig pivotConfig = (PivotConfig) args[3]; String description = (String)args[4]; - return new DataFrameTransformConfig(id, source, dest, pivotConfig, description); + Instant createTime = (Instant)args[5]; + String transformVersion = (String)args[6]; + return new DataFrameTransformConfig(id, source, dest, pivotConfig, description, createTime, transformVersion); }); static { @@ -66,6 +76,9 @@ public class DataFrameTransformConfig implements ToXContentObject { PARSER.declareObject(constructorArg(), (p, c) -> DestConfig.PARSER.apply(p, null), DEST); PARSER.declareObject(optionalConstructorArg(), (p, c) -> PivotConfig.fromXContent(p), PIVOT_TRANSFORM); PARSER.declareString(optionalConstructorArg(), DESCRIPTION); + PARSER.declareField(optionalConstructorArg(), + p -> TimeUtil.parseTimeFieldToInstant(p, CREATE_TIME.getPreferredName()), CREATE_TIME, ObjectParser.ValueType.VALUE); + PARSER.declareString(optionalConstructorArg(), VERSION); } public static DataFrameTransformConfig fromXContent(final XContentParser parser) { @@ -84,19 +97,23 @@ public class DataFrameTransformConfig implements ToXContentObject { * @return A DataFrameTransformConfig to preview, NOTE it will have a {@code null} id, destination and index. */ public static DataFrameTransformConfig forPreview(final SourceConfig source, final PivotConfig pivotConfig) { - return new DataFrameTransformConfig(null, source, null, pivotConfig, null); + return new DataFrameTransformConfig(null, source, null, pivotConfig, null, null, null); } DataFrameTransformConfig(final String id, final SourceConfig source, final DestConfig dest, final PivotConfig pivotConfig, - final String description) { + final String description, + final Instant createTime, + final String version) { this.id = id; this.source = source; this.dest = dest; this.pivotConfig = pivotConfig; this.description = description; + this.createTime = createTime == null ? null : Instant.ofEpochMilli(createTime.toEpochMilli()); + this.transformVersion = version == null ? null : Version.fromString(version); } public String getId() { @@ -115,6 +132,14 @@ public class DataFrameTransformConfig implements ToXContentObject { return pivotConfig; } + public Version getVersion() { + return transformVersion; + } + + public Instant getCreateTime() { + return createTime; + } + @Nullable public String getDescription() { return description; @@ -138,6 +163,12 @@ public class DataFrameTransformConfig implements ToXContentObject { if (description != null) { builder.field(DESCRIPTION.getPreferredName(), description); } + if (createTime != null) { + builder.timeField(CREATE_TIME.getPreferredName(), CREATE_TIME.getPreferredName() + "_string", createTime.toEpochMilli()); + } + if (transformVersion != null) { + builder.field(VERSION.getPreferredName(), transformVersion); + } builder.endObject(); return builder; } @@ -155,15 +186,17 @@ public class DataFrameTransformConfig implements ToXContentObject { final DataFrameTransformConfig that = (DataFrameTransformConfig) other; return Objects.equals(this.id, that.id) - && Objects.equals(this.source, that.source) - && Objects.equals(this.dest, that.dest) - && Objects.equals(this.description, that.description) - && Objects.equals(this.pivotConfig, that.pivotConfig); + && Objects.equals(this.source, that.source) + && Objects.equals(this.dest, that.dest) + && Objects.equals(this.description, that.description) + && Objects.equals(this.transformVersion, that.transformVersion) + && Objects.equals(this.createTime, that.createTime) + && Objects.equals(this.pivotConfig, that.pivotConfig); } @Override public int hashCode() { - return Objects.hash(id, source, dest, pivotConfig, description); + return Objects.hash(id, source, dest, pivotConfig, description, createTime, transformVersion); } @Override @@ -209,7 +242,7 @@ public class DataFrameTransformConfig implements ToXContentObject { } public DataFrameTransformConfig build() { - return new DataFrameTransformConfig(id, source, dest, pivotConfig, description); + return new DataFrameTransformConfig(id, source, dest, pivotConfig, description, null, null); } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/util/TimeUtil.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/util/TimeUtil.java new file mode 100644 index 00000000000..2470c3f7a4a --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/util/TimeUtil.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.dataframe.transforms.util; + +import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.time.Instant; +import java.time.format.DateTimeFormatter; +import java.util.Date; + +public final class TimeUtil { + + /** + * Parse out a Date object given the current parser and field name. + * + * @param parser current XContentParser + * @param fieldName the field's preferred name (utilized in exception) + * @return parsed Date object + * @throws IOException from XContentParser + */ + public static Date parseTimeField(XContentParser parser, String fieldName) throws IOException { + if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return new Date(parser.longValue()); + } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + return new Date(DateFormatters.from(DateTimeFormatter.ISO_INSTANT.parse(parser.text())).toInstant().toEpochMilli()); + } + throw new IllegalArgumentException( + "unexpected token [" + parser.currentToken() + "] for [" + fieldName + "]"); + } + + public static Instant parseTimeFieldToInstant(XContentParser parser, String fieldName) throws IOException { + if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return Instant.ofEpochMilli(parser.longValue()); + } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + return DateFormatters.from(DateTimeFormatter.ISO_INSTANT.parse(parser.text())).toInstant(); + } + throw new IllegalArgumentException( + "unexpected token [" + parser.currentToken() + "] for [" + fieldName + "]"); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index 8489d14e101..44af764cc68 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -195,7 +195,7 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { client::getDataFrameTransformAsync); assertNull(getResponse.getInvalidTransforms()); assertThat(getResponse.getTransformConfigurations(), hasSize(1)); - assertEquals(transform, getResponse.getTransformConfigurations().get(0)); + assertEquals(transform.getId(), getResponse.getTransformConfigurations().get(0).getId()); } public void testGetAllAndPageTransforms() throws IOException { @@ -219,7 +219,7 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { client::getDataFrameTransformAsync); assertNull(getResponse.getInvalidTransforms()); assertThat(getResponse.getTransformConfigurations(), hasSize(2)); - assertEquals(transform, getResponse.getTransformConfigurations().get(1)); + assertEquals(transform.getId(), getResponse.getTransformConfigurations().get(1).getId()); getRequest.setPageParams(new PageParams(0,1)); getResponse = execute(getRequest, client::getDataFrameTransform, diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java index 1b5228d9622..84782a8a970 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.client.dataframe.transforms; +import org.elasticsearch.Version; import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfigTests; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -27,6 +28,7 @@ import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; +import java.time.Instant; import java.util.Collections; import java.util.function.Predicate; @@ -36,8 +38,13 @@ import static org.elasticsearch.client.dataframe.transforms.SourceConfigTests.ra public class DataFrameTransformConfigTests extends AbstractXContentTestCase { public static DataFrameTransformConfig randomDataFrameTransformConfig() { - return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomSourceConfig(), - randomDestConfig(), PivotConfigTests.randomPivotConfig(), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 100)); + return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), + randomSourceConfig(), + randomDestConfig(), + PivotConfigTests.randomPivotConfig(), + randomBoolean() ? null : randomAlphaOfLengthBetween(1, 100), + randomBoolean() ? null : Instant.now(), + randomBoolean() ? null : Version.CURRENT.toString()); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index 6604e97ed5b..d9ebccfb91f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -478,7 +478,6 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest RestHighLevelClient client = highLevelClient(); - QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); GroupConfig groupConfig = GroupConfig.builder().groupBy("reviewer", TermsGroupSource.builder().setField("user_id").build()).build(); AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); @@ -564,7 +563,6 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest public void testGetDataFrameTransform() throws IOException, InterruptedException { createIndex("source-data"); - QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); GroupConfig groupConfig = GroupConfig.builder().groupBy("reviewer", TermsGroupSource.builder().setField("user_id").build()).build(); AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); diff --git a/docs/java-rest/high-level/query-builders.asciidoc b/docs/java-rest/high-level/query-builders.asciidoc index 32a3b06505b..eb401618f36 100644 --- a/docs/java-rest/high-level/query-builders.asciidoc +++ b/docs/java-rest/high-level/query-builders.asciidoc @@ -28,7 +28,7 @@ This page lists all the available search queries with their corresponding `Query | {ref}/query-dsl-simple-query-string-query.html[Simple Query String] | {query-ref}/SimpleQueryStringBuilder.html[SimpleQueryStringBuilder] | {query-ref}/QueryBuilders.html#simpleQueryStringQuery-java.lang.String-[QueryBuilders.simpleQueryStringQuery()] |====== -==== Term level queries +==== Term-level queries [options="header"] |====== | Search Query | QueryBuilder Class | Method in QueryBuilders diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/data-frames/apis/put-transform.asciidoc index 10000126ef0..93ce6db6df3 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/data-frames/apis/put-transform.asciidoc @@ -100,7 +100,7 @@ PUT _data_frame/transforms/ecommerce_transform } -------------------------------------------------- // CONSOLE -// TEST[setup:kibana_sample_data_ecommerce] +// TEST[skip: https://github.com/elastic/elasticsearch/issues/43271] When the transform is created, you receive the following results: [source,js] diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index eef66544d1b..1f3d8f879dc 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -3,6 +3,7 @@ [partintro] -- +TIP: The fastest way to get started with {es} is to https://www.elastic.co/cloud/elasticsearch-service/signup[start a free 14-day trial of Elasticsearch Service] in the cloud. Elasticsearch is a highly scalable open-source full-text search and analytics engine. It allows you to store, search, and analyze big volumes of data quickly and in near real time. It is generally used as the underlying engine/technology that powers applications that have complex search features and requirements. @@ -118,10 +119,11 @@ NOTE: Elasticsearch includes a bundled version of http://openjdk.java.net[OpenJD from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the <> -The binaries are available from http://www.elastic.co/downloads[`www.elastic.co/downloads`] -along with all the releases that have been made in the past. For each release, platform -dependent archive versions are available for Windows, Linux and MacOS, as well as `DEB` and `RPM` -packages for Linux, and `MSI` installation packages for Windows. +The binaries are available from http://www.elastic.co/downloads[`www.elastic.co/downloads`]. +Platform dependent archives are available for Windows, Linux and macOS. In addition, +`DEB` and `RPM` packages are available for Linux, and an `MSI` installation package +is available for Windows. You can also use the Elastic Homebrew tap to <> on macOS. [float] === Installation example on Linux diff --git a/docs/reference/index-modules/slowlog.asciidoc b/docs/reference/index-modules/slowlog.asciidoc index 235256bdce7..a96c8fe995b 100644 --- a/docs/reference/index-modules/slowlog.asciidoc +++ b/docs/reference/index-modules/slowlog.asciidoc @@ -26,7 +26,26 @@ index.search.slowlog.threshold.fetch.trace: 200ms index.search.slowlog.level: info -------------------------------------------------- -All of the above settings are _dynamic_ and are set per-index. +All of the above settings are _dynamic_ and can be set for each index using the +<> API. For example: + +[source,js] +-------------------------------------------------- +PUT /twitter/_settings +{ + "index.search.slowlog.threshold.query.warn": "10s", + "index.search.slowlog.threshold.query.info": "5s", + "index.search.slowlog.threshold.query.debug": "2s", + "index.search.slowlog.threshold.query.trace": "500ms", + "index.search.slowlog.threshold.fetch.warn": "1s", + "index.search.slowlog.threshold.fetch.info": "800ms", + "index.search.slowlog.threshold.fetch.debug": "500ms", + "index.search.slowlog.threshold.fetch.trace": "200ms", + "index.search.slowlog.level": "info" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] By default, none are enabled (set to `-1`). Levels (`warn`, `info`, `debug`, `trace`) allow to control under which logging level the log @@ -83,7 +102,23 @@ index.indexing.slowlog.level: info index.indexing.slowlog.source: 1000 -------------------------------------------------- -All of the above settings are _dynamic_ and are set per-index. +All of the above settings are _dynamic_ and can be set for each index using the +<> API. For example: + +[source,js] +-------------------------------------------------- +PUT /twitter/_settings +{ + "index.indexing.slowlog.threshold.index.warn": "10s", + "index.indexing.slowlog.threshold.index.info": "5s", + "index.indexing.slowlog.threshold.index.debug": "2s", + "index.indexing.slowlog.threshold.index.trace": "500ms", + "index.indexing.slowlog.level": "info", + "index.indexing.slowlog.source": "1000" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] By default Elasticsearch will log the first 1000 characters of the _source in the slowlog. You can change that with `index.indexing.slowlog.source`. Setting diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index 0929b36e774..6e03ddd16b7 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -249,7 +249,7 @@ Response: } -------------------------------------------------- // TESTRESPONSE[s/"source" : \{[^}]*\}/"source" : $body.$_path/] -// TESTRESPONSE[s/"details" : \[[^\]]*\]//] +// TESTRESPONSE[s/"details" : \[[^\]]*\]/"details" : $body.$_path/] // TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/] // TESTRESPONSE[s/: "[^"]*"/: $body.$_path/] //// diff --git a/docs/reference/mapping/params/coerce.asciidoc b/docs/reference/mapping/params/coerce.asciidoc index 55f31262351..be5b2a648c6 100644 --- a/docs/reference/mapping/params/coerce.asciidoc +++ b/docs/reference/mapping/params/coerce.asciidoc @@ -47,8 +47,7 @@ PUT my_index/_doc/2 <1> The `number_one` field will contain the integer `10`. <2> This document will be rejected because coercion is disabled. -TIP: The `coerce` setting is allowed to have different settings for fields of -the same name in the same index. Its value can be updated on existing fields +TIP: The `coerce` setting value can be updated on existing fields using the <>. [[coerce-setting]] diff --git a/docs/reference/mapping/params/ignore-malformed.asciidoc b/docs/reference/mapping/params/ignore-malformed.asciidoc index 8c91bb48ee7..d84a7290eb7 100644 --- a/docs/reference/mapping/params/ignore-malformed.asciidoc +++ b/docs/reference/mapping/params/ignore-malformed.asciidoc @@ -46,8 +46,7 @@ PUT my_index/_doc/2 <1> This document will have the `text` field indexed, but not the `number_one` field. <2> This document will be rejected because `number_two` does not allow malformed values. -TIP: The `ignore_malformed` setting is allowed to have different settings for -fields of the same name in the same index. Its value can be updated on +TIP: The `ignore_malformed` setting value can be updated on existing fields using the <>. diff --git a/docs/reference/mapping/params/multi-fields.asciidoc b/docs/reference/mapping/params/multi-fields.asciidoc index ee1bc02c7fd..448f7fd2e81 100644 --- a/docs/reference/mapping/params/multi-fields.asciidoc +++ b/docs/reference/mapping/params/multi-fields.asciidoc @@ -60,8 +60,7 @@ GET my_index/_search NOTE: Multi-fields do not change the original `_source` field. -TIP: The `fields` setting is allowed to have different settings for fields of -the same name in the same index. New multi-fields can be added to existing +TIP: New multi-fields can be added to existing fields using the <>. ==== Multi-fields with multiple analyzers diff --git a/docs/reference/mapping/params/normalizer.asciidoc b/docs/reference/mapping/params/normalizer.asciidoc index da0298abda2..0f8c09552f4 100644 --- a/docs/reference/mapping/params/normalizer.asciidoc +++ b/docs/reference/mapping/params/normalizer.asciidoc @@ -7,7 +7,7 @@ produces a single token. The `normalizer` is applied prior to indexing the keyword, as well as at search-time when the `keyword` field is searched via a query parser such as -the <> query or via a term level query +the <> query or via a term-level query such as the <> query. [source,js] diff --git a/docs/reference/mapping/params/norms.asciidoc b/docs/reference/mapping/params/norms.asciidoc index 8a7be4baef8..6a250d296a2 100644 --- a/docs/reference/mapping/params/norms.asciidoc +++ b/docs/reference/mapping/params/norms.asciidoc @@ -11,11 +11,10 @@ don't need scoring on a specific field, you should disable norms on that field. In particular, this is the case for fields that are used solely for filtering or aggregations. -TIP: The `norms` setting must have the same setting for fields of the -same name in the same index. Norms can be disabled on existing fields using +TIP: Norms can be disabled on existing fields using the <>. -Norms can be disabled (but not reenabled) after the fact, using the +Norms can be disabled (but not reenabled after the fact), using the <> like so: [source,js] diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 26f59e1058c..be5308db41a 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -108,6 +108,8 @@ geo-points containing any more than latitude and longitude (two dimensions) valu and reject the whole document. | `true` +|`coerce` |If `true` unclosed linear rings in polygons will be automatically closed. +| `false` |======================================================================= diff --git a/docs/reference/monitoring/configuring-filebeat.asciidoc b/docs/reference/monitoring/configuring-filebeat.asciidoc new file mode 100644 index 00000000000..fd77dc860ce --- /dev/null +++ b/docs/reference/monitoring/configuring-filebeat.asciidoc @@ -0,0 +1,187 @@ +[role="xpack"] +[testenv="basic"] +[[configuring-filebeat]] +=== Collecting {es} log data with {filebeat} + +[subs="attributes"] +++++ +Collecting log data with {filebeat} +++++ + +You can use {filebeat} to monitor the {es} log files, collect log events, and +ship them to the monitoring cluster. Your recent logs are visible on the +*Monitoring* page in {kib}. + +//NOTE: The tagged regions are re-used in the Stack Overview. + +. Verify that {es} is running and that the monitoring cluster is ready to +receive data from {filebeat}. ++ +-- +TIP: In production environments, we strongly recommend using a separate cluster +(referred to as the _monitoring cluster_) to store the data. Using a separate +monitoring cluster prevents production cluster outages from impacting your +ability to access your monitoring data. It also prevents monitoring activities +from impacting the performance of your production cluster. See +{stack-ov}/monitoring-production.html[Monitoring in a production environment]. + +-- + +. Enable the collection of monitoring data on your cluster. ++ +-- +include::configuring-metricbeat.asciidoc[tag=enable-collection] + +For more information, see <> and <>. +-- + +. Identify which logs you want to monitor. ++ +-- +The {filebeat} {es} module can handle +{stack-ov}/audit-log-output.html[audit logs], +{ref}/logging.html#deprecation-logging[deprecation logs], +{ref}/gc-logging.html[gc logs], {ref}/logging.html[server logs], and +{ref}/index-modules-slowlog.html[slow logs]. +For more information about the location of your {es} logs, see the +{ref}/path-settings.html[path.logs] setting. + +IMPORTANT: If there are both structured (`*.json`) and unstructured (plain text) +versions of the logs, you must use the structured logs. Otherwise, they might +not appear in the appropriate context in {kib}. + +-- + +. {filebeat-ref}/filebeat-installation.html[Install {filebeat}] on the {es} +nodes that contain logs that you want to monitor. + +. Identify where to send the log data. ++ +-- +// tag::output-elasticsearch[] +For example, specify {es} output information for your monitoring cluster in +the {filebeat} configuration file (`filebeat.yml`): + +[source,yaml] +---------------------------------- +output.elasticsearch: + # Array of hosts to connect to. + hosts: ["http://es-mon-1:9200", "http://es-mon2:9200"] <1> + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" +---------------------------------- +<1> In this example, the data is stored on a monitoring cluster with nodes +`es-mon-1` and `es-mon-2`. + +If you configured the monitoring cluster to use encrypted communications, you +must access it via HTTPS. For example, use a `hosts` setting like +`https://es-mon-1:9200`. + +IMPORTANT: The {es} {monitor-features} use ingest pipelines, therefore the +cluster that stores the monitoring data must have at least one +<>. + +If {es} {security-features} are enabled on the monitoring cluster, you must +provide a valid user ID and password so that {filebeat} can send metrics +successfully. + +For more information about these configuration options, see +{filebeat-ref}/elasticsearch-output.html[Configure the {es} output]. +// end::output-elasticsearch[] +-- + +. Optional: Identify where to visualize the data. ++ +-- +// tag::setup-kibana[] +{filebeat} provides example {kib} dashboards, visualizations and searches. To +load the dashboards into the appropriate {kib} instance, specify the +`setup.kibana` information in the {filebeat} configuration file +(`filebeat.yml`) on each node: + +[source,yaml] +---------------------------------- +setup.kibana: + host: "localhost:5601" + #username: "my_kibana_user" + #password: "YOUR_PASSWORD" +---------------------------------- + +TIP: In production environments, we strongly recommend using a dedicated {kib} +instance for your monitoring cluster. + +If {security-features} are enabled, you must provide a valid user ID and +password so that {filebeat} can connect to {kib}: + +.. Create a user on the monitoring cluster that has the +{stack-ov}/built-in-roles.html[`kibana_user` built-in role] or equivalent +privileges. + +.. Add the `username` and `password` settings to the {es} output information in +the {filebeat} configuration file. The example shows a hard-coded password, but +you should store sensitive values in the +{filebeat-ref}/keystore.html[secrets keystore]. + +See {filebeat-ref}/setup-kibana-endpoint.html[Configure the {kib} endpoint]. + +// end::setup-kibana[] +-- + +. Enable the {es} module and set up the initial {filebeat} environment on each +node. ++ +-- +// tag::enable-es-module[] +For example: + +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +filebeat modules enable elasticsearch +filebeat setup -e +---------------------------------------------------------------------- + +For more information, see +{filebeat-ref}/filebeat-module-elasticsearch.html[{es} module]. + +// end::enable-es-module[] +-- + +. Configure the {es} module in {filebeat} on each node. ++ +-- +// tag::configure-es-module[] +If the logs that you want to monitor aren't in the default location, set the +appropriate path variables in the `modules.d/elasticsearch.yml` file. See +{filebeat-ref}/filebeat-module-elasticsearch.html#configuring-elasticsearch-module[Configure the {es} module]. + +IMPORTANT: If there are JSON logs, configure the `var.paths` settings to point +to them instead of the plain text logs. + +// end::configure-es-module[] +-- + +. {filebeat-ref}/filebeat-starting.html[Start {filebeat}] on each node. ++ +-- +NOTE: Depending on how you’ve installed {filebeat}, you might see errors related +to file ownership or permissions when you try to run {filebeat} modules. See +{beats-ref}/config-file-permissions.html[Config file ownership and permissions]. + +-- + +. Check whether the appropriate indices exist on the monitoring cluster. ++ +-- +For example, use the {ref}/cat-indices.html[cat indices] command to verify +that there are new `filebeat-*` indices. + +TIP: If you want to use the *Monitoring* UI in {kib}, there must also be +`.monitoring-*` indices. Those indices are generated when you collect metrics +about {stack} products. For example, see <>. + +-- + +. {kibana-ref}/monitoring-data.html[View the monitoring data in {kib}]. diff --git a/docs/reference/monitoring/configuring-monitoring.asciidoc b/docs/reference/monitoring/configuring-monitoring.asciidoc index 9fe5b71d7a1..e129999e3a5 100644 --- a/docs/reference/monitoring/configuring-monitoring.asciidoc +++ b/docs/reference/monitoring/configuring-monitoring.asciidoc @@ -12,9 +12,12 @@ methods to collect metrics about {es}: * <> * <> +You can also <>. + To learn about monitoring in general, see {stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. include::collecting-monitoring-data.asciidoc[] include::configuring-metricbeat.asciidoc[] +include::configuring-filebeat.asciidoc[] include::indices.asciidoc[] \ No newline at end of file diff --git a/docs/reference/query-dsl/constant-score-query.asciidoc b/docs/reference/query-dsl/constant-score-query.asciidoc index aa7ee60aa5c..bfcece8d62e 100644 --- a/docs/reference/query-dsl/constant-score-query.asciidoc +++ b/docs/reference/query-dsl/constant-score-query.asciidoc @@ -1,12 +1,12 @@ [[query-dsl-constant-score-query]] === Constant Score Query -A query that wraps another query and simply returns a -constant score equal to the query boost for every document in the -filter. Maps to Lucene `ConstantScoreQuery`. +Wraps a <> and returns every matching +document with a <> equal to the `boost` +parameter value. [source,js] --------------------------------------------------- +---- GET /_search { "query": { @@ -18,8 +18,22 @@ GET /_search } } } --------------------------------------------------- +---- // CONSOLE -Filter clauses are executed in <>, -meaning that scoring is ignored and clauses are considered for caching. +[[constant-score-top-level-params]] +==== Top-level parameters for `constant_score` +`filter`:: ++ +-- +<> you wish to run. Any returned documents +must match this query. Required. + +Filter queries do not calculate <>. To +speed up performance, {es} automatically caches frequently used filter queries. +-- + +`boost`:: +Floating point number used as the constant <> for every document matching the `filter` query. Default is `1.0`. +Optional. \ No newline at end of file diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index 487e944c09e..1a088a35014 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -39,7 +39,7 @@ Then the following simple query can be executed with a [source,js] -------------------------------------------------- -GET /_search +GET my_locations/_search { "query": { "bool" : { @@ -94,7 +94,7 @@ representations of the geo point, the filter can accept it as well: [source,js] -------------------------------------------------- -GET /_search +GET my_locations/_search { "query": { "bool" : { @@ -129,7 +129,7 @@ conform with http://geojson.org/[GeoJSON]. [source,js] -------------------------------------------------- -GET /_search +GET my_locations/_search { "query": { "bool" : { @@ -157,7 +157,7 @@ Format in `lat,lon`. [source,js] -------------------------------------------------- -GET /_search +GET my_locations/_search { "query": { "bool" : { @@ -183,7 +183,7 @@ GET /_search [source,js] -------------------------------------------------- -GET /_search +GET my_locations/_search { "query": { "bool" : { @@ -208,7 +208,7 @@ GET /_search [source,js] -------------------------------------------------- -GET /_search +GET my_locations/_search { "query": { "bool" : { @@ -243,7 +243,7 @@ geohash the geohash can be specified in both `top_left` and [source,js] -------------------------------------------------- -GET /_search +GET my_locations/_search { "query": { "geo_bounding_box" : { @@ -273,7 +273,7 @@ values separately. [source,js] -------------------------------------------------- -GET /_search +GET my_locations/_search { "query": { "bool" : { @@ -323,7 +323,7 @@ are not supported. Here is an example: [source,js] -------------------------------------------------- -GET /_search +GET my_locations/_search { "query": { "bool" : { diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc index f5788783f7e..3049cb36317 100644 --- a/docs/reference/query-dsl/intervals-query.asciidoc +++ b/docs/reference/query-dsl/intervals-query.asciidoc @@ -40,7 +40,6 @@ POST _search } ] }, - "boost" : 2.0, "_name" : "favourite_food" } } @@ -298,4 +297,4 @@ POST _search } } -------------------------------------------------- -// CONSOLE \ No newline at end of file +// CONSOLE diff --git a/docs/reference/query-dsl/term-level-queries.asciidoc b/docs/reference/query-dsl/term-level-queries.asciidoc index f4e185ba959..badfb85ac62 100644 --- a/docs/reference/query-dsl/term-level-queries.asciidoc +++ b/docs/reference/query-dsl/term-level-queries.asciidoc @@ -1,72 +1,63 @@ [[term-level-queries]] -== Term level queries +== Term-level queries -While the <> will analyze the query -string before executing, the _term-level queries_ operate on the exact terms -that are stored in the inverted index, and will normalize terms before executing -only for <> fields with <> property. +You can use **term-level queries** to find documents based on precise values in +structured data. Examples of structured data include date ranges, IP addresses, +prices, or product IDs. -These queries are usually used for structured data like numbers, dates, and -enums, rather than full text fields. Alternatively, they allow you to craft -low-level queries, foregoing the analysis process. +Unlike <>, term-level queries do not +analyze search terms. Instead, term-level queries match the exact terms stored +in a field. -The queries in this group are: + +[NOTE] +==== +Term-level queries still normalize search terms for `keyword` fields with the +`normalizer` property. For more details, see <>. +==== + +[float] +[[term-level-query-types]] +=== Types of term-level queries <>:: - - Find documents which contain the exact term specified in the field - specified. +Returns documents that contain an exact term in a provided field. <>:: - - Find documents which contain any of the exact terms specified in the field - specified. +Returns documents that contain one or more exact terms in a provided field. <>:: - - Find documents which match with one or more of the specified terms. The - number of terms that must match depend on the specified minimum should - match field or script. +Returns documents that contain a minimum number of exact terms in a provided +field. You can define the minimum number of matching terms using a field or +script. <>:: - - Find documents where the field specified contains values (dates, numbers, - or strings) in the range specified. +Returns documents that contain terms within a provided range. <>:: - - Find documents where the field specified contains any non-null value. +Returns documents that contain any indexed value for a field. <>:: - - Find documents where the field specified contains terms which begin with - the exact prefix specified. +Returns documents that contain a specific prefix in a provided field. <>:: - - Find documents where the field specified contains terms which match the - pattern specified, where the pattern supports single character wildcards - (`?`) and multi-character wildcards (`*`) +Returns documents that contain terms matching a wildcard pattern. <>:: - - Find documents where the field specified contains terms which match the - <> specified. +Returns documents that contain terms matching a +https://en.wikipedia.org/wiki/Regular_expression[regular expression]. <>:: - - Find documents where the field specified contains terms which are fuzzily - similar to the specified term. Fuzziness is measured as a - http://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance[Levenshtein edit distance] - of 1 or 2. +Returns documents that contain terms similar to the search term. {es} measures +similarity, or fuzziness, using a +http://en.wikipedia.org/wiki/Levenshtein_distance[Levenshtein edit distance]. <>:: - Find documents of the specified type. +Returns documents of the specified type. <>:: - - Find documents with the specified type and IDs. +Returns documents based on their <>. include::term-query.asciidoc[] diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc index bfbfbc54179..2d0e3e218e6 100644 --- a/docs/reference/search/suggesters/phrase-suggest.asciidoc +++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc @@ -224,7 +224,7 @@ The response contains suggestions scored by the most likely spell correction fir [source,js] -------------------------------------------------- -POST _search +POST test/_search { "suggest": { "text" : "noble prize", @@ -293,7 +293,7 @@ properties that can be configured. [source,js] -------------------------------------------------- -POST _search +POST test/_search { "suggest": { "text" : "obel prize", @@ -414,7 +414,7 @@ accept ordinary analyzer names. [source,js] -------------------------------------------------- -POST _search +POST test/_search { "suggest": { "text" : "obel prize", diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc index 800cecb4065..7e03ad3947b 100644 --- a/docs/reference/setup/install.asciidoc +++ b/docs/reference/setup/install.asciidoc @@ -59,6 +59,13 @@ downloaded from the Elastic Docker Registry. + {ref}/docker.html[Install {es} with Docker] +`brew`:: + +Formulae are available from the Elastic Homebrew tap for installing +{es} on macOS with the Homebrew package manager. ++ +{ref}/brew.html[Install {es} on macOS with Homebrew] + [float] [[config-mgmt-tools]] === Configuration Management Tools @@ -84,3 +91,4 @@ include::install/windows.asciidoc[] include::install/docker.asciidoc[] +include::install/brew.asciidoc[] diff --git a/docs/reference/setup/install/brew.asciidoc b/docs/reference/setup/install/brew.asciidoc new file mode 100644 index 00000000000..cfc9e4c3e05 --- /dev/null +++ b/docs/reference/setup/install/brew.asciidoc @@ -0,0 +1,69 @@ +[[brew]] +=== Install {es} on macOS with Homebrew + +Elastic publishes Homebrew formulae so you can install {es} with the +https://brew.sh/[Homebrew] package manager. + +To install with Homebrew, you first need to tap the +Elastic Homebrew repository: + +[source,sh] +------------------------- +brew tap elastic/tap +------------------------- + +Once you've tapped the Elastic Homebrew repo, you can use `brew install` to +install the default distribution of {es}: + +[source,sh] +------------------------- +brew install elastic/tap/elasticsearch-full +------------------------- + +This installs the most recently released default distribution of {es}. +To install the OSS distribution, specify `elastic/tap/elasticsearch-oss`. + +[[brew-layout]] +==== Directory layout for Homebrew installs + +When you install {es} with `brew install` the config files, logs, +and data directory are stored in the following locations. + +[cols="> + +| data + | The location of the data files of each index / shard allocated + on the node. Can hold multiple locations. + | /usr/local/var/lib/elasticsearch + | path.data + +| logs + | Log files location. + | /usr/local/var/log/elasticsearch + | path.logs + +| plugins + | Plugin files location. Each plugin will be contained in a subdirectory. + | /usr/local/var/homebrew/linked/elasticsearch/plugins + | + +|======================================================================= + +include::next-steps.asciidoc[] diff --git a/docs/reference/setup/secure-settings.asciidoc b/docs/reference/setup/secure-settings.asciidoc index 4e3799db75b..82b61848a84 100644 --- a/docs/reference/setup/secure-settings.asciidoc +++ b/docs/reference/setup/secure-settings.asciidoc @@ -7,8 +7,12 @@ keystore and the `elasticsearch-keystore` tool to manage the settings in the key NOTE: All commands here should be run as the user which will run Elasticsearch. -NOTE: Only some settings are designed to be read from the keystore. See -documentation for each setting to see if it is supported as part of the keystore. +IMPORTANT: Only some settings are designed to be read from the keystore. However, +the keystore has no validation to block unsupported settings. +Adding unsupported settings to the keystore will cause {es} +Additional unsupported settings being added to the keystore will cause Elasticsearch +to fail to start. See documentation for each setting to see if it is supported +as part of the keystore. NOTE: All the modifications to the keystore take affect only after restarting Elasticsearch. diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index fa6ffdd0407..34598a77df1 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -289,7 +289,7 @@ public abstract class AbstractXContentParser implements XContentParser { return readListOrderedMap(this); } - interface MapFactory { + public interface MapFactory { Map newMap(); } @@ -391,7 +391,7 @@ public abstract class AbstractXContentParser implements XContentParser { return list; } - static Object readValue(XContentParser parser, MapFactory mapFactory, XContentParser.Token token) throws IOException { + public static Object readValue(XContentParser parser, MapFactory mapFactory, XContentParser.Token token) throws IOException { if (token == XContentParser.Token.VALUE_NULL) { return null; } else if (token == XContentParser.Token.VALUE_STRING) { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java deleted file mode 100644 index c1ba6bfbe1c..00000000000 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java +++ /dev/null @@ -1,451 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.painless; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.painless.lookup.PainlessClass; -import org.elasticsearch.painless.lookup.PainlessConstructor; -import org.elasticsearch.painless.lookup.PainlessField; -import org.elasticsearch.painless.lookup.PainlessLookup; -import org.elasticsearch.painless.lookup.PainlessLookupBuilder; -import org.elasticsearch.painless.lookup.PainlessLookupUtility; -import org.elasticsearch.painless.lookup.PainlessMethod; -import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.spi.Whitelist; - -import java.io.IOException; -import java.io.PrintStream; -import java.lang.reflect.Modifier; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; -import java.util.Comparator; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.function.Consumer; -import java.util.stream.Collectors; - -import static java.util.Comparator.comparing; - -/** - * Generates an API reference from the method and type whitelists in {@link PainlessLookup}. - */ -public class PainlessDocGenerator { - - private static final PainlessLookup PAINLESS_LOOKUP = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); - private static final Logger logger = LogManager.getLogger(PainlessDocGenerator.class); - private static final Comparator FIELD_NAME = comparing(f -> f.javaField.getName()); - private static final Comparator METHOD_NAME = comparing(m -> m.javaMethod.getName()); - private static final Comparator METHOD_NUMBER_OF_PARAMS = comparing(m -> m.typeParameters.size()); - private static final Comparator CONSTRUCTOR_NUMBER_OF_PARAMS = comparing(m -> m.typeParameters.size()); - - public static void main(String[] args) throws IOException { - Path apiRootPath = PathUtils.get(args[0]); - - // Blow away the last execution and recreate it from scratch - IOUtils.rm(apiRootPath); - Files.createDirectories(apiRootPath); - - Path indexPath = apiRootPath.resolve("index.asciidoc"); - logger.info("Starting to write [index.asciidoc]"); - try (PrintStream indexStream = new PrintStream( - Files.newOutputStream(indexPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), - false, StandardCharsets.UTF_8.name())) { - emitGeneratedWarning(indexStream); - List> classes = PAINLESS_LOOKUP.getClasses().stream().sorted( - Comparator.comparing(Class::getCanonicalName)).collect(Collectors.toList()); - for (Class clazz : classes) { - PainlessClass struct = PAINLESS_LOOKUP.lookupPainlessClass(clazz); - String canonicalClassName = PainlessLookupUtility.typeToCanonicalTypeName(clazz); - - if (clazz.isPrimitive()) { - // Primitives don't have methods to reference - continue; - } - if (clazz == def.class) { - // def is special but doesn't have any methods all of its own. - continue; - } - indexStream.print("include::"); - indexStream.print(canonicalClassName); - indexStream.println(".asciidoc[]"); - - Path typePath = apiRootPath.resolve(canonicalClassName + ".asciidoc"); - logger.info("Writing [{}.asciidoc]", canonicalClassName); - try (PrintStream typeStream = new PrintStream( - Files.newOutputStream(typePath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), - false, StandardCharsets.UTF_8.name())) { - emitGeneratedWarning(typeStream); - typeStream.print("[["); - emitAnchor(typeStream, clazz); - typeStream.print("]]++"); - typeStream.print(canonicalClassName); - typeStream.println("++::"); - - Consumer documentField = field -> PainlessDocGenerator.documentField(typeStream, field); - Consumer documentMethod = method -> PainlessDocGenerator.documentMethod(typeStream, method); - Consumer documentConstructor = - constructor -> PainlessDocGenerator.documentConstructor(typeStream, constructor); - struct.staticFields.values().stream().sorted(FIELD_NAME).forEach(documentField); - struct.fields.values().stream().sorted(FIELD_NAME).forEach(documentField); - struct.staticMethods.values().stream().sorted( - METHOD_NAME.thenComparing(METHOD_NUMBER_OF_PARAMS)).forEach(documentMethod); - struct.constructors.values().stream().sorted(CONSTRUCTOR_NUMBER_OF_PARAMS).forEach(documentConstructor); - Map> inherited = new TreeMap<>(); - struct.methods.values().stream().sorted(METHOD_NAME.thenComparing(METHOD_NUMBER_OF_PARAMS)).forEach(method -> { - if (method.targetClass == clazz) { - documentMethod(typeStream, method); - } else { - inherited.put(canonicalClassName, method.targetClass); - } - }); - - if (false == inherited.isEmpty()) { - typeStream.print("* Inherits methods from "); - boolean first = true; - for (Class inheritsFrom : inherited.values()) { - if (first) { - first = false; - } else { - typeStream.print(", "); - } - typeStream.print("++"); - emitStruct(typeStream, inheritsFrom); - typeStream.print("++"); - } - typeStream.println(); - } - } - } - } - logger.info("Done writing [index.asciidoc]"); - } - - private static void documentField(PrintStream stream, PainlessField field) { - stream.print("** [["); - emitAnchor(stream, field); - stream.print("]]"); - - if (Modifier.isStatic(field.javaField.getModifiers())) { - stream.print("static "); - } - - emitType(stream, field.typeParameter); - stream.print(' '); - - String javadocRoot = javadocRoot(field); - emitJavadocLink(stream, javadocRoot, field); - stream.print('['); - stream.print(field.javaField.getName()); - stream.print(']'); - - if (javadocRoot.equals("java8")) { - stream.print(" ("); - emitJavadocLink(stream, "java9", field); - stream.print("[java 9])"); - } - - stream.println(); - } - - /** - * Document a constructor. - */ - private static void documentConstructor(PrintStream stream, PainlessConstructor constructor) { - stream.print("* ++[["); - emitAnchor(stream, constructor); - stream.print("]]"); - - String javadocRoot = javadocRoot(constructor.javaConstructor.getDeclaringClass()); - emitJavadocLink(stream, javadocRoot, constructor); - stream.print('['); - - stream.print(constructorName(constructor)); - - stream.print("]("); - boolean first = true; - for (Class arg : constructor.typeParameters) { - if (first) { - first = false; - } else { - stream.print(", "); - } - emitType(stream, arg); - } - stream.print(")++"); - - if (javadocRoot.equals("java8")) { - stream.print(" ("); - emitJavadocLink(stream, "java9", constructor); - stream.print("[java 9])"); - } - - stream.println(); - } - - /** - * Document a method. - */ - private static void documentMethod(PrintStream stream, PainlessMethod method) { - stream.print("* ++[["); - emitAnchor(stream, method); - stream.print("]]"); - - if (method.targetClass == method.javaMethod.getDeclaringClass() && Modifier.isStatic(method.javaMethod.getModifiers())) { - stream.print("static "); - } - - emitType(stream, method.returnType); - stream.print(' '); - - String javadocRoot = javadocRoot(method); - emitJavadocLink(stream, javadocRoot, method); - stream.print('['); - - stream.print(methodName(method)); - - stream.print("]("); - boolean first = true; - for (Class arg : method.typeParameters) { - if (first) { - first = false; - } else { - stream.print(", "); - } - emitType(stream, arg); - } - stream.print(")++"); - - if (javadocRoot.equals("java8")) { - stream.print(" ("); - emitJavadocLink(stream, "java9", method); - stream.print("[java 9])"); - } - - stream.println(); - } - - /** - * Anchor text for a {@link PainlessClass}. - */ - private static void emitAnchor(PrintStream stream, Class clazz) { - stream.print("painless-api-reference-"); - stream.print(PainlessLookupUtility.typeToCanonicalTypeName(clazz).replace('.', '-')); - } - - /** - * Anchor text for a {@link PainlessConstructor}. - */ - private static void emitAnchor(PrintStream stream, PainlessConstructor constructor) { - emitAnchor(stream, constructor.javaConstructor.getDeclaringClass()); - stream.print('-'); - stream.print(constructorName(constructor)); - stream.print('-'); - stream.print(constructor.typeParameters.size()); - } - - /** - * Anchor text for a {@link PainlessMethod}. - */ - private static void emitAnchor(PrintStream stream, PainlessMethod method) { - emitAnchor(stream, method.targetClass); - stream.print('-'); - stream.print(methodName(method)); - stream.print('-'); - stream.print(method.typeParameters.size()); - } - - /** - * Anchor text for a {@link PainlessField}. - */ - private static void emitAnchor(PrintStream stream, PainlessField field) { - emitAnchor(stream, field.javaField.getDeclaringClass()); - stream.print('-'); - stream.print(field.javaField.getName()); - } - - private static String constructorName(PainlessConstructor constructor) { - return PainlessLookupUtility.typeToCanonicalTypeName(constructor.javaConstructor.getDeclaringClass()); - } - - private static String methodName(PainlessMethod method) { - return PainlessLookupUtility.typeToCanonicalTypeName(method.targetClass); - } - - /** - * Emit a {@link Class}. If the type is primitive or an array of primitives this just emits the name of the type. Otherwise this emits - an internal link with the text. - */ - private static void emitType(PrintStream stream, Class clazz) { - emitStruct(stream, clazz); - while ((clazz = clazz.getComponentType()) != null) { - stream.print("[]"); - } - } - - /** - * Emit a {@link PainlessClass}. If the {@linkplain PainlessClass} is primitive or def this just emits the name of the struct. - * Otherwise this emits an internal link with the name. - */ - private static void emitStruct(PrintStream stream, Class clazz) { - String canonicalClassName = PainlessLookupUtility.typeToCanonicalTypeName(clazz); - - if (false == clazz.isPrimitive() && clazz != def.class) { - stream.print("<<"); - emitAnchor(stream, clazz); - stream.print(','); - stream.print(canonicalClassName); - stream.print(">>"); - } else { - stream.print(canonicalClassName); - } - } - - /** - * Emit an external link to Javadoc for a {@link PainlessMethod}. - * - * @param root name of the root uri variable - */ - private static void emitJavadocLink(PrintStream stream, String root, PainlessConstructor constructor) { - stream.print("link:{"); - stream.print(root); - stream.print("-javadoc}/"); - stream.print(classUrlPath(constructor.javaConstructor.getDeclaringClass())); - stream.print(".html#"); - stream.print(constructorName(constructor)); - stream.print("%2D"); - boolean first = true; - for (Class clazz: constructor.typeParameters) { - if (first) { - first = false; - } else { - stream.print("%2D"); - } - stream.print(clazz.getName()); - if (clazz.isArray()) { - stream.print(":A"); - } - } - stream.print("%2D"); - } - - /** - * Emit an external link to Javadoc for a {@link PainlessMethod}. - * - * @param root name of the root uri variable - */ - private static void emitJavadocLink(PrintStream stream, String root, PainlessMethod method) { - stream.print("link:{"); - stream.print(root); - stream.print("-javadoc}/"); - stream.print(classUrlPath(method.javaMethod.getDeclaringClass())); - stream.print(".html#"); - stream.print(methodName(method)); - stream.print("%2D"); - boolean first = true; - if (method.targetClass != method.javaMethod.getDeclaringClass()) { - first = false; - stream.print(method.javaMethod.getDeclaringClass().getName()); - } - for (Class clazz: method.typeParameters) { - if (first) { - first = false; - } else { - stream.print("%2D"); - } - stream.print(clazz.getName()); - if (clazz.isArray()) { - stream.print(":A"); - } - } - stream.print("%2D"); - } - - /** - * Emit an external link to Javadoc for a {@link PainlessField}. - * - * @param root name of the root uri variable - */ - private static void emitJavadocLink(PrintStream stream, String root, PainlessField field) { - stream.print("link:{"); - stream.print(root); - stream.print("-javadoc}/"); - stream.print(classUrlPath(field.javaField.getDeclaringClass())); - stream.print(".html#"); - stream.print(field.javaField.getName()); - } - - /** - * Pick the javadoc root for a {@link PainlessMethod}. - */ - private static String javadocRoot(PainlessMethod method) { - if (method.targetClass != method.javaMethod.getDeclaringClass()) { - return "painless"; - } - return javadocRoot(method.targetClass); - } - - /** - * Pick the javadoc root for a {@link PainlessField}. - */ - private static String javadocRoot(PainlessField field) { - return javadocRoot(field.javaField.getDeclaringClass()); - } - - /** - * Pick the javadoc root for a {@link Class}. - */ - private static String javadocRoot(Class clazz) { - String classPackage = clazz.getPackage().getName(); - if (classPackage.startsWith("java")) { - return "java8"; - } - if (classPackage.startsWith("org.elasticsearch.painless")) { - return "painless"; - } - if (classPackage.startsWith("org.elasticsearch")) { - return "elasticsearch"; - } - if (classPackage.startsWith("org.joda.time")) { - return "joda-time"; - } - if (classPackage.startsWith("org.apache.lucene")) { - return "lucene-core"; - } - throw new IllegalArgumentException("Unrecognized package: " + classPackage); - } - - private static void emitGeneratedWarning(PrintStream stream) { - stream.println("////"); - stream.println("Automatically generated by PainlessDocGenerator. Do not edit."); - stream.println("Rebuild by running `gradle generatePainlessApi`."); - stream.println("////"); - stream.println(); - } - - private static String classUrlPath(Class clazz) { - return clazz.getName().replace('.', '/').replace('$', '.'); - } -} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index 410a92ab198..e534717e775 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -19,89 +19,21 @@ package org.elasticsearch.index.reindex; -import org.apache.logging.log4j.LogManager; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser.ValueType; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.VersionType; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.script.Script; import java.io.IOException; -import java.io.InputStream; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.List; -import java.util.Map; -import static java.util.Collections.emptyMap; -import static java.util.Objects.requireNonNull; import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.rest.RestRequest.Method.POST; /** * Expose reindex over rest. */ public class RestReindexAction extends AbstractBaseReindexRestHandler { - static final ObjectParser PARSER = new ObjectParser<>("reindex"); - static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in reindex requests is deprecated."; - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestReindexAction.class)); - - static { - ObjectParser.Parser sourceParser = (parser, request, context) -> { - // Funky hack to work around Search not having a proper ObjectParser and us wanting to extract query if using remote. - Map source = parser.map(); - String[] indices = extractStringArray(source, "index"); - if (indices != null) { - request.getSearchRequest().indices(indices); - } - String[] types = extractStringArray(source, "type"); - if (types != null) { - deprecationLogger.deprecatedAndMaybeLog("reindex_with_types", TYPES_DEPRECATION_MESSAGE); - request.getSearchRequest().types(types); - } - request.setRemoteInfo(buildRemoteInfo(source)); - XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()); - builder.map(source); - try (InputStream stream = BytesReference.bytes(builder).streamInput(); - XContentParser innerParser = parser.contentType().xContent() - .createParser(parser.getXContentRegistry(), parser.getDeprecationHandler(), stream)) { - request.getSearchRequest().source().parseXContent(innerParser, false); - } - }; - - ObjectParser destParser = new ObjectParser<>("dest"); - destParser.declareString(IndexRequest::index, new ParseField("index")); - destParser.declareString((request, type) -> { - deprecationLogger.deprecatedAndMaybeLog("reindex_with_types", TYPES_DEPRECATION_MESSAGE); - request.type(type); - }, new ParseField("type")); - destParser.declareString(IndexRequest::routing, new ParseField("routing")); - destParser.declareString(IndexRequest::opType, new ParseField("op_type")); - destParser.declareString(IndexRequest::setPipeline, new ParseField("pipeline")); - destParser.declareString((s, i) -> s.versionType(VersionType.fromString(i)), new ParseField("version_type")); - - PARSER.declareField(sourceParser::parse, new ParseField("source"), ValueType.OBJECT); - PARSER.declareField((p, v, c) -> destParser.parse(p, v.getDestination(), c), new ParseField("dest"), ValueType.OBJECT); - PARSER.declareInt(RestReindexAction::setMaxDocsValidateIdentical, new ParseField("max_docs", "size")); - PARSER.declareField((p, v, c) -> v.setScript(Script.parse(p)), new ParseField("script"), - ValueType.OBJECT); - PARSER.declareString(ReindexRequest::setConflicts, new ParseField("conflicts")); - } public RestReindexAction(Settings settings, RestController controller) { super(settings, ReindexAction.INSTANCE); @@ -124,123 +56,15 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler source) throws IOException { - @SuppressWarnings("unchecked") - Map remote = (Map) source.remove("remote"); - if (remote == null) { - return null; - } - String username = extractString(remote, "username"); - String password = extractString(remote, "password"); - String hostInRequest = requireNonNull(extractString(remote, "host"), "[host] must be specified to reindex from a remote cluster"); - URI uri; - try { - uri = new URI(hostInRequest); - // URI has less stringent URL parsing than our code. We want to fail if all values are not provided. - if (uri.getPort() == -1) { - throw new URISyntaxException(hostInRequest, "The port was not defined in the [host]"); - } - } catch (URISyntaxException ex) { - throw new IllegalArgumentException("[host] must be of the form [scheme]://[host]:[port](/[pathPrefix])? but was [" - + hostInRequest + "]", ex); - } - - String scheme = uri.getScheme(); - String host = uri.getHost(); - int port = uri.getPort(); - - String pathPrefix = null; - if (uri.getPath().isEmpty() == false) { - pathPrefix = uri.getPath(); - } - - Map headers = extractStringStringMap(remote, "headers"); - TimeValue socketTimeout = extractTimeValue(remote, "socket_timeout", RemoteInfo.DEFAULT_SOCKET_TIMEOUT); - TimeValue connectTimeout = extractTimeValue(remote, "connect_timeout", RemoteInfo.DEFAULT_CONNECT_TIMEOUT); - if (false == remote.isEmpty()) { - throw new IllegalArgumentException( - "Unsupported fields in [remote]: [" + Strings.collectionToCommaDelimitedString(remote.keySet()) + "]"); - } - return new RemoteInfo(scheme, host, port, pathPrefix, queryForRemote(source), - username, password, headers, socketTimeout, connectTimeout); - } - - /** - * Yank a string array from a map. Emulates XContent's permissive String to - * String array conversions. - */ - private static String[] extractStringArray(Map source, String name) { - Object value = source.remove(name); - if (value == null) { - return null; - } - if (value instanceof List) { - @SuppressWarnings("unchecked") - List list = (List) value; - return list.toArray(new String[list.size()]); - } else if (value instanceof String) { - return new String[] {(String) value}; - } else { - throw new IllegalArgumentException("Expected [" + name + "] to be a list of a string but was [" + value + ']'); - } - } - - private static String extractString(Map source, String name) { - Object value = source.remove(name); - if (value == null) { - return null; - } - if (value instanceof String) { - return (String) value; - } - throw new IllegalArgumentException("Expected [" + name + "] to be a string but was [" + value + "]"); - } - - private static Map extractStringStringMap(Map source, String name) { - Object value = source.remove(name); - if (value == null) { - return emptyMap(); - } - if (false == value instanceof Map) { - throw new IllegalArgumentException("Expected [" + name + "] to be an object containing strings but was [" + value + "]"); - } - Map map = (Map) value; - for (Map.Entry entry : map.entrySet()) { - if (false == entry.getKey() instanceof String || false == entry.getValue() instanceof String) { - throw new IllegalArgumentException("Expected [" + name + "] to be an object containing strings but has [" + entry + "]"); - } - } - @SuppressWarnings("unchecked") // We just checked.... - Map safe = (Map) map; - return safe; - } - - private static TimeValue extractTimeValue(Map source, String name, TimeValue defaultValue) { - String string = extractString(source, name); - return string == null ? defaultValue : parseTimeValue(string, name); - } - - private static BytesReference queryForRemote(Map source) throws IOException { - XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); - Object query = source.remove("query"); - if (query == null) { - return BytesReference.bytes(matchAllQuery().toXContent(builder, ToXContent.EMPTY_PARAMS)); - } - if (!(query instanceof Map)) { - throw new IllegalArgumentException("Expected [query] to be an object but was [" + query + "]"); - } - @SuppressWarnings("unchecked") - Map map = (Map) query; - return BytesReference.bytes(builder.map(map)); - } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java index f0aca38545b..e9c46bbb8e4 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.rest.RestRequest.Method; @@ -33,11 +32,8 @@ import org.junit.Before; import java.io.IOException; import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; import static java.util.Collections.singletonMap; -import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; public class RestReindexActionTests extends RestActionTestCase { @@ -48,126 +44,6 @@ public class RestReindexActionTests extends RestActionTestCase { action = new RestReindexAction(Settings.EMPTY, controller()); } - public void testBuildRemoteInfoNoRemote() throws IOException { - assertNull(RestReindexAction.buildRemoteInfo(new HashMap<>())); - } - - public void testBuildRemoteInfoFullyLoaded() throws IOException { - Map headers = new HashMap<>(); - headers.put("first", "a"); - headers.put("second", "b"); - headers.put("third", ""); - - Map remote = new HashMap<>(); - remote.put("host", "https://example.com:9200"); - remote.put("username", "testuser"); - remote.put("password", "testpass"); - remote.put("headers", headers); - remote.put("socket_timeout", "90s"); - remote.put("connect_timeout", "10s"); - - Map query = new HashMap<>(); - query.put("a", "b"); - - Map source = new HashMap<>(); - source.put("remote", remote); - source.put("query", query); - - RemoteInfo remoteInfo = RestReindexAction.buildRemoteInfo(source); - assertEquals("https", remoteInfo.getScheme()); - assertEquals("example.com", remoteInfo.getHost()); - assertEquals(9200, remoteInfo.getPort()); - assertEquals("{\n \"a\" : \"b\"\n}", remoteInfo.getQuery().utf8ToString()); - assertEquals("testuser", remoteInfo.getUsername()); - assertEquals("testpass", remoteInfo.getPassword()); - assertEquals(headers, remoteInfo.getHeaders()); - assertEquals(timeValueSeconds(90), remoteInfo.getSocketTimeout()); - assertEquals(timeValueSeconds(10), remoteInfo.getConnectTimeout()); - } - - public void testBuildRemoteInfoWithoutAllParts() throws IOException { - expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("example.com")); - expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase(":9200")); - expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("http://:9200")); - expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("example.com:9200")); - expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("http://example.com")); - } - - public void testBuildRemoteInfoWithAllHostParts() throws IOException { - RemoteInfo info = buildRemoteInfoHostTestCase("http://example.com:9200"); - assertEquals("http", info.getScheme()); - assertEquals("example.com", info.getHost()); - assertEquals(9200, info.getPort()); - assertNull(info.getPathPrefix()); - assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); // Didn't set the timeout so we should get the default - assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); // Didn't set the timeout so we should get the default - - info = buildRemoteInfoHostTestCase("https://other.example.com:9201"); - assertEquals("https", info.getScheme()); - assertEquals("other.example.com", info.getHost()); - assertEquals(9201, info.getPort()); - assertNull(info.getPathPrefix()); - assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); - assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); - - info = buildRemoteInfoHostTestCase("https://[::1]:9201"); - assertEquals("https", info.getScheme()); - assertEquals("[::1]", info.getHost()); - assertEquals(9201, info.getPort()); - assertNull(info.getPathPrefix()); - assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); - assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); - - info = buildRemoteInfoHostTestCase("https://other.example.com:9201/"); - assertEquals("https", info.getScheme()); - assertEquals("other.example.com", info.getHost()); - assertEquals(9201, info.getPort()); - assertEquals("/", info.getPathPrefix()); - assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); - assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); - - info = buildRemoteInfoHostTestCase("https://other.example.com:9201/proxy-path/"); - assertEquals("https", info.getScheme()); - assertEquals("other.example.com", info.getHost()); - assertEquals(9201, info.getPort()); - assertEquals("/proxy-path/", info.getPathPrefix()); - assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); - assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); - - final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> buildRemoteInfoHostTestCase("https")); - assertEquals("[host] must be of the form [scheme]://[host]:[port](/[pathPrefix])? but was [https]", - exception.getMessage()); - } - - public void testReindexFromRemoteRequestParsing() throws IOException { - BytesReference request; - try (XContentBuilder b = JsonXContent.contentBuilder()) { - b.startObject(); { - b.startObject("source"); { - b.startObject("remote"); { - b.field("host", "http://localhost:9200"); - } - b.endObject(); - b.field("index", "source"); - } - b.endObject(); - b.startObject("dest"); { - b.field("index", "dest"); - } - b.endObject(); - } - b.endObject(); - request = BytesReference.bytes(b); - } - try (XContentParser p = createParser(JsonXContent.jsonXContent, request)) { - ReindexRequest r = new ReindexRequest(); - RestReindexAction.PARSER.parse(p, r, null); - assertEquals("localhost", r.getRemoteInfo().getHost()); - assertArrayEquals(new String[] {"source"}, r.getSearchRequest().indices()); - } - } - public void testPipelineQueryParameterIsError() throws IOException { FakeRestRequest.Builder request = new FakeRestRequest.Builder(xContentRegistry()); try (XContentBuilder body = JsonXContent.contentBuilder().prettyPrint()) { @@ -206,16 +82,6 @@ public class RestReindexActionTests extends RestActionTestCase { } } - private RemoteInfo buildRemoteInfoHostTestCase(String hostInRest) throws IOException { - Map remote = new HashMap<>(); - remote.put("host", hostInRest); - - Map source = new HashMap<>(); - source.put("remote", remote); - - return RestReindexAction.buildRemoteInfo(source); - } - /** * test deprecation is logged if one or more types are used in source search request inside reindex */ @@ -234,7 +100,7 @@ public class RestReindexActionTests extends RestActionTestCase { b.endObject(); requestBuilder.withContent(new BytesArray(BytesReference.bytes(b).toBytesRef()), XContentType.JSON); dispatchRequest(requestBuilder.build()); - assertWarnings(RestReindexAction.TYPES_DEPRECATION_MESSAGE); + assertWarnings(ReindexRequest.TYPES_DEPRECATION_MESSAGE); } /** @@ -255,6 +121,6 @@ public class RestReindexActionTests extends RestActionTestCase { b.endObject(); requestBuilder.withContent(new BytesArray(BytesReference.bytes(b).toBytesRef()), XContentType.JSON); dispatchRequest(requestBuilder.build()); - assertWarnings(RestReindexAction.TYPES_DEPRECATION_MESSAGE); + assertWarnings(ReindexRequest.TYPES_DEPRECATION_MESSAGE); } } diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 193ebd0b077..e7f027a7517 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -23,7 +23,7 @@ esplugin { } versions << [ - 'aws': '1.11.505' + 'aws': '1.11.562' ] dependencies { diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.505.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.505.jar.sha1 deleted file mode 100644 index add5db290e8..00000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.505.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d19328c227b2b5ad81d137361ebc9cbcd0396465 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.562.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.562.jar.sha1 new file mode 100644 index 00000000000..ed8ded6a360 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.562.jar.sha1 @@ -0,0 +1 @@ +b5fc47ec1b5afe180f5ebb4eda755acdca7a20ae \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.505.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.505.jar.sha1 deleted file mode 100644 index 857f0888de3..00000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.505.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b669b3c90ea9bf73734ab26f0cb30c5c66addf55 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.562.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.562.jar.sha1 new file mode 100644 index 00000000000..040d28de70b --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.562.jar.sha1 @@ -0,0 +1 @@ +0211a055fb3e036033af4b1ca25ada0574a756ec \ No newline at end of file diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java index 3135769df5f..040472723fb 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java @@ -20,679 +20,17 @@ package org.elasticsearch.discovery.ec2; import com.amazonaws.AmazonClientException; -import com.amazonaws.AmazonWebServiceRequest; import com.amazonaws.ClientConfiguration; -import com.amazonaws.ResponseMetadata; import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.regions.Region; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteRequest; -import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteResult; -import com.amazonaws.services.ec2.model.AcceptTransitGatewayVpcAttachmentRequest; -import com.amazonaws.services.ec2.model.AcceptTransitGatewayVpcAttachmentResult; -import com.amazonaws.services.ec2.model.AcceptVpcEndpointConnectionsRequest; -import com.amazonaws.services.ec2.model.AcceptVpcEndpointConnectionsResult; -import com.amazonaws.services.ec2.model.AcceptVpcPeeringConnectionRequest; -import com.amazonaws.services.ec2.model.AcceptVpcPeeringConnectionResult; -import com.amazonaws.services.ec2.model.AdvertiseByoipCidrRequest; -import com.amazonaws.services.ec2.model.AdvertiseByoipCidrResult; -import com.amazonaws.services.ec2.model.AllocateAddressRequest; -import com.amazonaws.services.ec2.model.AllocateAddressResult; -import com.amazonaws.services.ec2.model.AllocateHostsRequest; -import com.amazonaws.services.ec2.model.AllocateHostsResult; -import com.amazonaws.services.ec2.model.ApplySecurityGroupsToClientVpnTargetNetworkRequest; -import com.amazonaws.services.ec2.model.ApplySecurityGroupsToClientVpnTargetNetworkResult; -import com.amazonaws.services.ec2.model.AssignIpv6AddressesRequest; -import com.amazonaws.services.ec2.model.AssignIpv6AddressesResult; -import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesRequest; -import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesResult; -import com.amazonaws.services.ec2.model.AssociateAddressRequest; -import com.amazonaws.services.ec2.model.AssociateAddressResult; -import com.amazonaws.services.ec2.model.AssociateClientVpnTargetNetworkRequest; -import com.amazonaws.services.ec2.model.AssociateClientVpnTargetNetworkResult; -import com.amazonaws.services.ec2.model.AssociateDhcpOptionsRequest; -import com.amazonaws.services.ec2.model.AssociateDhcpOptionsResult; -import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileRequest; -import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileResult; -import com.amazonaws.services.ec2.model.AssociateRouteTableRequest; -import com.amazonaws.services.ec2.model.AssociateRouteTableResult; -import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockRequest; -import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockResult; -import com.amazonaws.services.ec2.model.AssociateTransitGatewayRouteTableRequest; -import com.amazonaws.services.ec2.model.AssociateTransitGatewayRouteTableResult; -import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockRequest; -import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockResult; -import com.amazonaws.services.ec2.model.AttachClassicLinkVpcRequest; -import com.amazonaws.services.ec2.model.AttachClassicLinkVpcResult; -import com.amazonaws.services.ec2.model.AttachInternetGatewayRequest; -import com.amazonaws.services.ec2.model.AttachInternetGatewayResult; -import com.amazonaws.services.ec2.model.AttachNetworkInterfaceRequest; -import com.amazonaws.services.ec2.model.AttachNetworkInterfaceResult; -import com.amazonaws.services.ec2.model.AttachVolumeRequest; -import com.amazonaws.services.ec2.model.AttachVolumeResult; -import com.amazonaws.services.ec2.model.AttachVpnGatewayRequest; -import com.amazonaws.services.ec2.model.AttachVpnGatewayResult; -import com.amazonaws.services.ec2.model.AuthorizeClientVpnIngressRequest; -import com.amazonaws.services.ec2.model.AuthorizeClientVpnIngressResult; -import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupEgressRequest; -import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupEgressResult; -import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressRequest; -import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressResult; -import com.amazonaws.services.ec2.model.BundleInstanceRequest; -import com.amazonaws.services.ec2.model.BundleInstanceResult; -import com.amazonaws.services.ec2.model.CancelBundleTaskRequest; -import com.amazonaws.services.ec2.model.CancelBundleTaskResult; -import com.amazonaws.services.ec2.model.CancelCapacityReservationRequest; -import com.amazonaws.services.ec2.model.CancelCapacityReservationResult; -import com.amazonaws.services.ec2.model.CancelConversionTaskRequest; -import com.amazonaws.services.ec2.model.CancelConversionTaskResult; -import com.amazonaws.services.ec2.model.CancelExportTaskRequest; -import com.amazonaws.services.ec2.model.CancelExportTaskResult; -import com.amazonaws.services.ec2.model.CancelImportTaskRequest; -import com.amazonaws.services.ec2.model.CancelImportTaskResult; -import com.amazonaws.services.ec2.model.CancelReservedInstancesListingRequest; -import com.amazonaws.services.ec2.model.CancelReservedInstancesListingResult; -import com.amazonaws.services.ec2.model.CancelSpotFleetRequestsRequest; -import com.amazonaws.services.ec2.model.CancelSpotFleetRequestsResult; -import com.amazonaws.services.ec2.model.CancelSpotInstanceRequestsRequest; -import com.amazonaws.services.ec2.model.CancelSpotInstanceRequestsResult; -import com.amazonaws.services.ec2.model.ConfirmProductInstanceRequest; -import com.amazonaws.services.ec2.model.ConfirmProductInstanceResult; -import com.amazonaws.services.ec2.model.CopyFpgaImageRequest; -import com.amazonaws.services.ec2.model.CopyFpgaImageResult; -import com.amazonaws.services.ec2.model.CopyImageRequest; -import com.amazonaws.services.ec2.model.CopyImageResult; -import com.amazonaws.services.ec2.model.CopySnapshotRequest; -import com.amazonaws.services.ec2.model.CopySnapshotResult; -import com.amazonaws.services.ec2.model.CreateCapacityReservationRequest; -import com.amazonaws.services.ec2.model.CreateCapacityReservationResult; -import com.amazonaws.services.ec2.model.CreateClientVpnEndpointRequest; -import com.amazonaws.services.ec2.model.CreateClientVpnEndpointResult; -import com.amazonaws.services.ec2.model.CreateClientVpnRouteRequest; -import com.amazonaws.services.ec2.model.CreateClientVpnRouteResult; -import com.amazonaws.services.ec2.model.CreateCustomerGatewayRequest; -import com.amazonaws.services.ec2.model.CreateCustomerGatewayResult; -import com.amazonaws.services.ec2.model.CreateDefaultSubnetRequest; -import com.amazonaws.services.ec2.model.CreateDefaultSubnetResult; -import com.amazonaws.services.ec2.model.CreateDefaultVpcRequest; -import com.amazonaws.services.ec2.model.CreateDefaultVpcResult; -import com.amazonaws.services.ec2.model.CreateDhcpOptionsRequest; -import com.amazonaws.services.ec2.model.CreateDhcpOptionsResult; -import com.amazonaws.services.ec2.model.CreateEgressOnlyInternetGatewayRequest; -import com.amazonaws.services.ec2.model.CreateEgressOnlyInternetGatewayResult; -import com.amazonaws.services.ec2.model.CreateFleetRequest; -import com.amazonaws.services.ec2.model.CreateFleetResult; -import com.amazonaws.services.ec2.model.CreateFlowLogsRequest; -import com.amazonaws.services.ec2.model.CreateFlowLogsResult; -import com.amazonaws.services.ec2.model.CreateFpgaImageRequest; -import com.amazonaws.services.ec2.model.CreateFpgaImageResult; -import com.amazonaws.services.ec2.model.CreateImageRequest; -import com.amazonaws.services.ec2.model.CreateImageResult; -import com.amazonaws.services.ec2.model.CreateInstanceExportTaskRequest; -import com.amazonaws.services.ec2.model.CreateInstanceExportTaskResult; -import com.amazonaws.services.ec2.model.CreateInternetGatewayRequest; -import com.amazonaws.services.ec2.model.CreateInternetGatewayResult; -import com.amazonaws.services.ec2.model.CreateKeyPairRequest; -import com.amazonaws.services.ec2.model.CreateKeyPairResult; -import com.amazonaws.services.ec2.model.CreateLaunchTemplateRequest; -import com.amazonaws.services.ec2.model.CreateLaunchTemplateResult; -import com.amazonaws.services.ec2.model.CreateLaunchTemplateVersionRequest; -import com.amazonaws.services.ec2.model.CreateLaunchTemplateVersionResult; -import com.amazonaws.services.ec2.model.CreateNatGatewayRequest; -import com.amazonaws.services.ec2.model.CreateNatGatewayResult; -import com.amazonaws.services.ec2.model.CreateNetworkAclEntryRequest; -import com.amazonaws.services.ec2.model.CreateNetworkAclEntryResult; -import com.amazonaws.services.ec2.model.CreateNetworkAclRequest; -import com.amazonaws.services.ec2.model.CreateNetworkAclResult; -import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionRequest; -import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionResult; -import com.amazonaws.services.ec2.model.CreateNetworkInterfaceRequest; -import com.amazonaws.services.ec2.model.CreateNetworkInterfaceResult; -import com.amazonaws.services.ec2.model.CreatePlacementGroupRequest; -import com.amazonaws.services.ec2.model.CreatePlacementGroupResult; -import com.amazonaws.services.ec2.model.CreateReservedInstancesListingRequest; -import com.amazonaws.services.ec2.model.CreateReservedInstancesListingResult; -import com.amazonaws.services.ec2.model.CreateRouteRequest; -import com.amazonaws.services.ec2.model.CreateRouteResult; -import com.amazonaws.services.ec2.model.CreateRouteTableRequest; -import com.amazonaws.services.ec2.model.CreateRouteTableResult; -import com.amazonaws.services.ec2.model.CreateSecurityGroupRequest; -import com.amazonaws.services.ec2.model.CreateSecurityGroupResult; -import com.amazonaws.services.ec2.model.CreateSnapshotRequest; -import com.amazonaws.services.ec2.model.CreateSnapshotResult; -import com.amazonaws.services.ec2.model.CreateSpotDatafeedSubscriptionRequest; -import com.amazonaws.services.ec2.model.CreateSpotDatafeedSubscriptionResult; -import com.amazonaws.services.ec2.model.CreateSubnetRequest; -import com.amazonaws.services.ec2.model.CreateSubnetResult; -import com.amazonaws.services.ec2.model.CreateTagsRequest; -import com.amazonaws.services.ec2.model.CreateTagsResult; -import com.amazonaws.services.ec2.model.CreateTransitGatewayRequest; -import com.amazonaws.services.ec2.model.CreateTransitGatewayResult; -import com.amazonaws.services.ec2.model.CreateTransitGatewayRouteRequest; -import com.amazonaws.services.ec2.model.CreateTransitGatewayRouteResult; -import com.amazonaws.services.ec2.model.CreateTransitGatewayRouteTableRequest; -import com.amazonaws.services.ec2.model.CreateTransitGatewayRouteTableResult; -import com.amazonaws.services.ec2.model.CreateTransitGatewayVpcAttachmentRequest; -import com.amazonaws.services.ec2.model.CreateTransitGatewayVpcAttachmentResult; -import com.amazonaws.services.ec2.model.CreateVolumeRequest; -import com.amazonaws.services.ec2.model.CreateVolumeResult; -import com.amazonaws.services.ec2.model.CreateVpcEndpointConnectionNotificationRequest; -import com.amazonaws.services.ec2.model.CreateVpcEndpointConnectionNotificationResult; -import com.amazonaws.services.ec2.model.CreateVpcEndpointRequest; -import com.amazonaws.services.ec2.model.CreateVpcEndpointResult; -import com.amazonaws.services.ec2.model.CreateVpcEndpointServiceConfigurationRequest; -import com.amazonaws.services.ec2.model.CreateVpcEndpointServiceConfigurationResult; -import com.amazonaws.services.ec2.model.CreateVpcPeeringConnectionRequest; -import com.amazonaws.services.ec2.model.CreateVpcPeeringConnectionResult; -import com.amazonaws.services.ec2.model.CreateVpcRequest; -import com.amazonaws.services.ec2.model.CreateVpcResult; -import com.amazonaws.services.ec2.model.CreateVpnConnectionRequest; -import com.amazonaws.services.ec2.model.CreateVpnConnectionResult; -import com.amazonaws.services.ec2.model.CreateVpnConnectionRouteRequest; -import com.amazonaws.services.ec2.model.CreateVpnConnectionRouteResult; -import com.amazonaws.services.ec2.model.CreateVpnGatewayRequest; -import com.amazonaws.services.ec2.model.CreateVpnGatewayResult; -import com.amazonaws.services.ec2.model.DeleteClientVpnEndpointRequest; -import com.amazonaws.services.ec2.model.DeleteClientVpnEndpointResult; -import com.amazonaws.services.ec2.model.DeleteClientVpnRouteRequest; -import com.amazonaws.services.ec2.model.DeleteClientVpnRouteResult; -import com.amazonaws.services.ec2.model.DeleteCustomerGatewayRequest; -import com.amazonaws.services.ec2.model.DeleteCustomerGatewayResult; -import com.amazonaws.services.ec2.model.DeleteDhcpOptionsRequest; -import com.amazonaws.services.ec2.model.DeleteDhcpOptionsResult; -import com.amazonaws.services.ec2.model.DeleteEgressOnlyInternetGatewayRequest; -import com.amazonaws.services.ec2.model.DeleteEgressOnlyInternetGatewayResult; -import com.amazonaws.services.ec2.model.DeleteFleetsRequest; -import com.amazonaws.services.ec2.model.DeleteFleetsResult; -import com.amazonaws.services.ec2.model.DeleteFlowLogsRequest; -import com.amazonaws.services.ec2.model.DeleteFlowLogsResult; -import com.amazonaws.services.ec2.model.DeleteFpgaImageRequest; -import com.amazonaws.services.ec2.model.DeleteFpgaImageResult; -import com.amazonaws.services.ec2.model.DeleteInternetGatewayRequest; -import com.amazonaws.services.ec2.model.DeleteInternetGatewayResult; -import com.amazonaws.services.ec2.model.DeleteKeyPairRequest; -import com.amazonaws.services.ec2.model.DeleteKeyPairResult; -import com.amazonaws.services.ec2.model.DeleteLaunchTemplateRequest; -import com.amazonaws.services.ec2.model.DeleteLaunchTemplateResult; -import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsRequest; -import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsResult; -import com.amazonaws.services.ec2.model.DeleteNatGatewayRequest; -import com.amazonaws.services.ec2.model.DeleteNatGatewayResult; -import com.amazonaws.services.ec2.model.DeleteNetworkAclEntryRequest; -import com.amazonaws.services.ec2.model.DeleteNetworkAclEntryResult; -import com.amazonaws.services.ec2.model.DeleteNetworkAclRequest; -import com.amazonaws.services.ec2.model.DeleteNetworkAclResult; -import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionRequest; -import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionResult; -import com.amazonaws.services.ec2.model.DeleteNetworkInterfaceRequest; -import com.amazonaws.services.ec2.model.DeleteNetworkInterfaceResult; -import com.amazonaws.services.ec2.model.DeletePlacementGroupRequest; -import com.amazonaws.services.ec2.model.DeletePlacementGroupResult; -import com.amazonaws.services.ec2.model.DeleteRouteRequest; -import com.amazonaws.services.ec2.model.DeleteRouteResult; -import com.amazonaws.services.ec2.model.DeleteRouteTableRequest; -import com.amazonaws.services.ec2.model.DeleteRouteTableResult; -import com.amazonaws.services.ec2.model.DeleteSecurityGroupRequest; -import com.amazonaws.services.ec2.model.DeleteSecurityGroupResult; -import com.amazonaws.services.ec2.model.DeleteSnapshotRequest; -import com.amazonaws.services.ec2.model.DeleteSnapshotResult; -import com.amazonaws.services.ec2.model.DeleteSpotDatafeedSubscriptionRequest; -import com.amazonaws.services.ec2.model.DeleteSpotDatafeedSubscriptionResult; -import com.amazonaws.services.ec2.model.DeleteSubnetRequest; -import com.amazonaws.services.ec2.model.DeleteSubnetResult; -import com.amazonaws.services.ec2.model.DeleteTagsRequest; -import com.amazonaws.services.ec2.model.DeleteTagsResult; -import com.amazonaws.services.ec2.model.DeleteTransitGatewayRequest; -import com.amazonaws.services.ec2.model.DeleteTransitGatewayResult; -import com.amazonaws.services.ec2.model.DeleteTransitGatewayRouteRequest; -import com.amazonaws.services.ec2.model.DeleteTransitGatewayRouteResult; -import com.amazonaws.services.ec2.model.DeleteTransitGatewayRouteTableRequest; -import com.amazonaws.services.ec2.model.DeleteTransitGatewayRouteTableResult; -import com.amazonaws.services.ec2.model.DeleteTransitGatewayVpcAttachmentRequest; -import com.amazonaws.services.ec2.model.DeleteTransitGatewayVpcAttachmentResult; -import com.amazonaws.services.ec2.model.DeleteVolumeRequest; -import com.amazonaws.services.ec2.model.DeleteVolumeResult; -import com.amazonaws.services.ec2.model.DeleteVpcEndpointConnectionNotificationsRequest; -import com.amazonaws.services.ec2.model.DeleteVpcEndpointConnectionNotificationsResult; -import com.amazonaws.services.ec2.model.DeleteVpcEndpointServiceConfigurationsRequest; -import com.amazonaws.services.ec2.model.DeleteVpcEndpointServiceConfigurationsResult; -import com.amazonaws.services.ec2.model.DeleteVpcEndpointsRequest; -import com.amazonaws.services.ec2.model.DeleteVpcEndpointsResult; -import com.amazonaws.services.ec2.model.DeleteVpcPeeringConnectionRequest; -import com.amazonaws.services.ec2.model.DeleteVpcPeeringConnectionResult; -import com.amazonaws.services.ec2.model.DeleteVpcRequest; -import com.amazonaws.services.ec2.model.DeleteVpcResult; -import com.amazonaws.services.ec2.model.DeleteVpnConnectionRequest; -import com.amazonaws.services.ec2.model.DeleteVpnConnectionResult; -import com.amazonaws.services.ec2.model.DeleteVpnConnectionRouteRequest; -import com.amazonaws.services.ec2.model.DeleteVpnConnectionRouteResult; -import com.amazonaws.services.ec2.model.DeleteVpnGatewayRequest; -import com.amazonaws.services.ec2.model.DeleteVpnGatewayResult; -import com.amazonaws.services.ec2.model.DeprovisionByoipCidrRequest; -import com.amazonaws.services.ec2.model.DeprovisionByoipCidrResult; -import com.amazonaws.services.ec2.model.DeregisterImageRequest; -import com.amazonaws.services.ec2.model.DeregisterImageResult; -import com.amazonaws.services.ec2.model.DescribeAccountAttributesRequest; -import com.amazonaws.services.ec2.model.DescribeAccountAttributesResult; -import com.amazonaws.services.ec2.model.DescribeAddressesRequest; -import com.amazonaws.services.ec2.model.DescribeAddressesResult; -import com.amazonaws.services.ec2.model.DescribeAggregateIdFormatRequest; -import com.amazonaws.services.ec2.model.DescribeAggregateIdFormatResult; -import com.amazonaws.services.ec2.model.DescribeAvailabilityZonesRequest; -import com.amazonaws.services.ec2.model.DescribeAvailabilityZonesResult; -import com.amazonaws.services.ec2.model.DescribeBundleTasksRequest; -import com.amazonaws.services.ec2.model.DescribeBundleTasksResult; -import com.amazonaws.services.ec2.model.DescribeByoipCidrsRequest; -import com.amazonaws.services.ec2.model.DescribeByoipCidrsResult; -import com.amazonaws.services.ec2.model.DescribeCapacityReservationsRequest; -import com.amazonaws.services.ec2.model.DescribeCapacityReservationsResult; -import com.amazonaws.services.ec2.model.DescribeClassicLinkInstancesRequest; -import com.amazonaws.services.ec2.model.DescribeClassicLinkInstancesResult; -import com.amazonaws.services.ec2.model.DescribeClientVpnAuthorizationRulesRequest; -import com.amazonaws.services.ec2.model.DescribeClientVpnAuthorizationRulesResult; -import com.amazonaws.services.ec2.model.DescribeClientVpnConnectionsRequest; -import com.amazonaws.services.ec2.model.DescribeClientVpnConnectionsResult; -import com.amazonaws.services.ec2.model.DescribeClientVpnEndpointsRequest; -import com.amazonaws.services.ec2.model.DescribeClientVpnEndpointsResult; -import com.amazonaws.services.ec2.model.DescribeClientVpnRoutesRequest; -import com.amazonaws.services.ec2.model.DescribeClientVpnRoutesResult; -import com.amazonaws.services.ec2.model.DescribeClientVpnTargetNetworksRequest; -import com.amazonaws.services.ec2.model.DescribeClientVpnTargetNetworksResult; -import com.amazonaws.services.ec2.model.DescribeConversionTasksRequest; -import com.amazonaws.services.ec2.model.DescribeConversionTasksResult; -import com.amazonaws.services.ec2.model.DescribeCustomerGatewaysRequest; -import com.amazonaws.services.ec2.model.DescribeCustomerGatewaysResult; -import com.amazonaws.services.ec2.model.DescribeDhcpOptionsRequest; -import com.amazonaws.services.ec2.model.DescribeDhcpOptionsResult; -import com.amazonaws.services.ec2.model.DescribeEgressOnlyInternetGatewaysRequest; -import com.amazonaws.services.ec2.model.DescribeEgressOnlyInternetGatewaysResult; -import com.amazonaws.services.ec2.model.DescribeElasticGpusRequest; -import com.amazonaws.services.ec2.model.DescribeElasticGpusResult; -import com.amazonaws.services.ec2.model.DescribeExportTasksRequest; -import com.amazonaws.services.ec2.model.DescribeExportTasksResult; -import com.amazonaws.services.ec2.model.DescribeFleetHistoryRequest; -import com.amazonaws.services.ec2.model.DescribeFleetHistoryResult; -import com.amazonaws.services.ec2.model.DescribeFleetInstancesRequest; -import com.amazonaws.services.ec2.model.DescribeFleetInstancesResult; -import com.amazonaws.services.ec2.model.DescribeFleetsRequest; -import com.amazonaws.services.ec2.model.DescribeFleetsResult; -import com.amazonaws.services.ec2.model.DescribeFlowLogsRequest; -import com.amazonaws.services.ec2.model.DescribeFlowLogsResult; -import com.amazonaws.services.ec2.model.DescribeFpgaImageAttributeRequest; -import com.amazonaws.services.ec2.model.DescribeFpgaImageAttributeResult; -import com.amazonaws.services.ec2.model.DescribeFpgaImagesRequest; -import com.amazonaws.services.ec2.model.DescribeFpgaImagesResult; -import com.amazonaws.services.ec2.model.DescribeHostReservationOfferingsRequest; -import com.amazonaws.services.ec2.model.DescribeHostReservationOfferingsResult; -import com.amazonaws.services.ec2.model.DescribeHostReservationsRequest; -import com.amazonaws.services.ec2.model.DescribeHostReservationsResult; -import com.amazonaws.services.ec2.model.DescribeHostsRequest; -import com.amazonaws.services.ec2.model.DescribeHostsResult; -import com.amazonaws.services.ec2.model.DescribeIamInstanceProfileAssociationsRequest; -import com.amazonaws.services.ec2.model.DescribeIamInstanceProfileAssociationsResult; -import com.amazonaws.services.ec2.model.DescribeIdFormatRequest; -import com.amazonaws.services.ec2.model.DescribeIdFormatResult; -import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatRequest; -import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatResult; -import com.amazonaws.services.ec2.model.DescribeImageAttributeRequest; -import com.amazonaws.services.ec2.model.DescribeImageAttributeResult; -import com.amazonaws.services.ec2.model.DescribeImagesRequest; -import com.amazonaws.services.ec2.model.DescribeImagesResult; -import com.amazonaws.services.ec2.model.DescribeImportImageTasksRequest; -import com.amazonaws.services.ec2.model.DescribeImportImageTasksResult; -import com.amazonaws.services.ec2.model.DescribeImportSnapshotTasksRequest; -import com.amazonaws.services.ec2.model.DescribeImportSnapshotTasksResult; -import com.amazonaws.services.ec2.model.DescribeInstanceAttributeRequest; -import com.amazonaws.services.ec2.model.DescribeInstanceAttributeResult; -import com.amazonaws.services.ec2.model.DescribeInstanceCreditSpecificationsRequest; -import com.amazonaws.services.ec2.model.DescribeInstanceCreditSpecificationsResult; -import com.amazonaws.services.ec2.model.DescribeInstanceStatusRequest; -import com.amazonaws.services.ec2.model.DescribeInstanceStatusResult; +import com.amazonaws.services.ec2.AbstractAmazonEC2; import com.amazonaws.services.ec2.model.DescribeInstancesRequest; import com.amazonaws.services.ec2.model.DescribeInstancesResult; -import com.amazonaws.services.ec2.model.DescribeInternetGatewaysRequest; -import com.amazonaws.services.ec2.model.DescribeInternetGatewaysResult; -import com.amazonaws.services.ec2.model.DescribeKeyPairsRequest; -import com.amazonaws.services.ec2.model.DescribeKeyPairsResult; -import com.amazonaws.services.ec2.model.DescribeLaunchTemplateVersionsRequest; -import com.amazonaws.services.ec2.model.DescribeLaunchTemplateVersionsResult; -import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesRequest; -import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesResult; -import com.amazonaws.services.ec2.model.DescribeMovingAddressesRequest; -import com.amazonaws.services.ec2.model.DescribeMovingAddressesResult; -import com.amazonaws.services.ec2.model.DescribeNatGatewaysRequest; -import com.amazonaws.services.ec2.model.DescribeNatGatewaysResult; -import com.amazonaws.services.ec2.model.DescribeNetworkAclsRequest; -import com.amazonaws.services.ec2.model.DescribeNetworkAclsResult; -import com.amazonaws.services.ec2.model.DescribeNetworkInterfaceAttributeRequest; -import com.amazonaws.services.ec2.model.DescribeNetworkInterfaceAttributeResult; -import com.amazonaws.services.ec2.model.DescribeNetworkInterfacePermissionsRequest; -import com.amazonaws.services.ec2.model.DescribeNetworkInterfacePermissionsResult; -import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesRequest; -import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesResult; -import com.amazonaws.services.ec2.model.DescribePlacementGroupsRequest; -import com.amazonaws.services.ec2.model.DescribePlacementGroupsResult; -import com.amazonaws.services.ec2.model.DescribePrefixListsRequest; -import com.amazonaws.services.ec2.model.DescribePrefixListsResult; -import com.amazonaws.services.ec2.model.DescribePrincipalIdFormatRequest; -import com.amazonaws.services.ec2.model.DescribePrincipalIdFormatResult; -import com.amazonaws.services.ec2.model.DescribePublicIpv4PoolsRequest; -import com.amazonaws.services.ec2.model.DescribePublicIpv4PoolsResult; -import com.amazonaws.services.ec2.model.DescribeRegionsRequest; -import com.amazonaws.services.ec2.model.DescribeRegionsResult; -import com.amazonaws.services.ec2.model.DescribeReservedInstancesListingsRequest; -import com.amazonaws.services.ec2.model.DescribeReservedInstancesListingsResult; -import com.amazonaws.services.ec2.model.DescribeReservedInstancesModificationsRequest; -import com.amazonaws.services.ec2.model.DescribeReservedInstancesModificationsResult; -import com.amazonaws.services.ec2.model.DescribeReservedInstancesOfferingsRequest; -import com.amazonaws.services.ec2.model.DescribeReservedInstancesOfferingsResult; -import com.amazonaws.services.ec2.model.DescribeReservedInstancesRequest; -import com.amazonaws.services.ec2.model.DescribeReservedInstancesResult; -import com.amazonaws.services.ec2.model.DescribeRouteTablesRequest; -import com.amazonaws.services.ec2.model.DescribeRouteTablesResult; -import com.amazonaws.services.ec2.model.DescribeScheduledInstanceAvailabilityRequest; -import com.amazonaws.services.ec2.model.DescribeScheduledInstanceAvailabilityResult; -import com.amazonaws.services.ec2.model.DescribeScheduledInstancesRequest; -import com.amazonaws.services.ec2.model.DescribeScheduledInstancesResult; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupReferencesRequest; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupReferencesResult; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; -import com.amazonaws.services.ec2.model.DescribeSnapshotAttributeRequest; -import com.amazonaws.services.ec2.model.DescribeSnapshotAttributeResult; -import com.amazonaws.services.ec2.model.DescribeSnapshotsRequest; -import com.amazonaws.services.ec2.model.DescribeSnapshotsResult; -import com.amazonaws.services.ec2.model.DescribeSpotDatafeedSubscriptionRequest; -import com.amazonaws.services.ec2.model.DescribeSpotDatafeedSubscriptionResult; -import com.amazonaws.services.ec2.model.DescribeSpotFleetInstancesRequest; -import com.amazonaws.services.ec2.model.DescribeSpotFleetInstancesResult; -import com.amazonaws.services.ec2.model.DescribeSpotFleetRequestHistoryRequest; -import com.amazonaws.services.ec2.model.DescribeSpotFleetRequestHistoryResult; -import com.amazonaws.services.ec2.model.DescribeSpotFleetRequestsRequest; -import com.amazonaws.services.ec2.model.DescribeSpotFleetRequestsResult; -import com.amazonaws.services.ec2.model.DescribeSpotInstanceRequestsRequest; -import com.amazonaws.services.ec2.model.DescribeSpotInstanceRequestsResult; -import com.amazonaws.services.ec2.model.DescribeSpotPriceHistoryRequest; -import com.amazonaws.services.ec2.model.DescribeSpotPriceHistoryResult; -import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsRequest; -import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsResult; -import com.amazonaws.services.ec2.model.DescribeSubnetsRequest; -import com.amazonaws.services.ec2.model.DescribeSubnetsResult; -import com.amazonaws.services.ec2.model.DescribeTagsRequest; -import com.amazonaws.services.ec2.model.DescribeTagsResult; -import com.amazonaws.services.ec2.model.DescribeTransitGatewayAttachmentsRequest; -import com.amazonaws.services.ec2.model.DescribeTransitGatewayAttachmentsResult; -import com.amazonaws.services.ec2.model.DescribeTransitGatewayRouteTablesRequest; -import com.amazonaws.services.ec2.model.DescribeTransitGatewayRouteTablesResult; -import com.amazonaws.services.ec2.model.DescribeTransitGatewayVpcAttachmentsRequest; -import com.amazonaws.services.ec2.model.DescribeTransitGatewayVpcAttachmentsResult; -import com.amazonaws.services.ec2.model.DescribeTransitGatewaysRequest; -import com.amazonaws.services.ec2.model.DescribeTransitGatewaysResult; -import com.amazonaws.services.ec2.model.DescribeVolumeAttributeRequest; -import com.amazonaws.services.ec2.model.DescribeVolumeAttributeResult; -import com.amazonaws.services.ec2.model.DescribeVolumeStatusRequest; -import com.amazonaws.services.ec2.model.DescribeVolumeStatusResult; -import com.amazonaws.services.ec2.model.DescribeVolumesModificationsRequest; -import com.amazonaws.services.ec2.model.DescribeVolumesModificationsResult; -import com.amazonaws.services.ec2.model.DescribeVolumesRequest; -import com.amazonaws.services.ec2.model.DescribeVolumesResult; -import com.amazonaws.services.ec2.model.DescribeVpcAttributeRequest; -import com.amazonaws.services.ec2.model.DescribeVpcAttributeResult; -import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkDnsSupportRequest; -import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkDnsSupportResult; -import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkRequest; -import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkResult; -import com.amazonaws.services.ec2.model.DescribeVpcEndpointConnectionNotificationsRequest; -import com.amazonaws.services.ec2.model.DescribeVpcEndpointConnectionNotificationsResult; -import com.amazonaws.services.ec2.model.DescribeVpcEndpointConnectionsRequest; -import com.amazonaws.services.ec2.model.DescribeVpcEndpointConnectionsResult; -import com.amazonaws.services.ec2.model.DescribeVpcEndpointServiceConfigurationsRequest; -import com.amazonaws.services.ec2.model.DescribeVpcEndpointServiceConfigurationsResult; -import com.amazonaws.services.ec2.model.DescribeVpcEndpointServicePermissionsRequest; -import com.amazonaws.services.ec2.model.DescribeVpcEndpointServicePermissionsResult; -import com.amazonaws.services.ec2.model.DescribeVpcEndpointServicesRequest; -import com.amazonaws.services.ec2.model.DescribeVpcEndpointServicesResult; -import com.amazonaws.services.ec2.model.DescribeVpcEndpointsRequest; -import com.amazonaws.services.ec2.model.DescribeVpcEndpointsResult; -import com.amazonaws.services.ec2.model.DescribeVpcPeeringConnectionsRequest; -import com.amazonaws.services.ec2.model.DescribeVpcPeeringConnectionsResult; -import com.amazonaws.services.ec2.model.DescribeVpcsRequest; -import com.amazonaws.services.ec2.model.DescribeVpcsResult; -import com.amazonaws.services.ec2.model.DescribeVpnConnectionsRequest; -import com.amazonaws.services.ec2.model.DescribeVpnConnectionsResult; -import com.amazonaws.services.ec2.model.DescribeVpnGatewaysRequest; -import com.amazonaws.services.ec2.model.DescribeVpnGatewaysResult; -import com.amazonaws.services.ec2.model.DetachClassicLinkVpcRequest; -import com.amazonaws.services.ec2.model.DetachClassicLinkVpcResult; -import com.amazonaws.services.ec2.model.DetachInternetGatewayRequest; -import com.amazonaws.services.ec2.model.DetachInternetGatewayResult; -import com.amazonaws.services.ec2.model.DetachNetworkInterfaceRequest; -import com.amazonaws.services.ec2.model.DetachNetworkInterfaceResult; -import com.amazonaws.services.ec2.model.DetachVolumeRequest; -import com.amazonaws.services.ec2.model.DetachVolumeResult; -import com.amazonaws.services.ec2.model.DetachVpnGatewayRequest; -import com.amazonaws.services.ec2.model.DetachVpnGatewayResult; -import com.amazonaws.services.ec2.model.DisableTransitGatewayRouteTablePropagationRequest; -import com.amazonaws.services.ec2.model.DisableTransitGatewayRouteTablePropagationResult; -import com.amazonaws.services.ec2.model.DisableVgwRoutePropagationRequest; -import com.amazonaws.services.ec2.model.DisableVgwRoutePropagationResult; -import com.amazonaws.services.ec2.model.DisableVpcClassicLinkDnsSupportRequest; -import com.amazonaws.services.ec2.model.DisableVpcClassicLinkDnsSupportResult; -import com.amazonaws.services.ec2.model.DisableVpcClassicLinkRequest; -import com.amazonaws.services.ec2.model.DisableVpcClassicLinkResult; -import com.amazonaws.services.ec2.model.DisassociateAddressRequest; -import com.amazonaws.services.ec2.model.DisassociateAddressResult; -import com.amazonaws.services.ec2.model.DisassociateClientVpnTargetNetworkRequest; -import com.amazonaws.services.ec2.model.DisassociateClientVpnTargetNetworkResult; -import com.amazonaws.services.ec2.model.DisassociateIamInstanceProfileRequest; -import com.amazonaws.services.ec2.model.DisassociateIamInstanceProfileResult; -import com.amazonaws.services.ec2.model.DisassociateRouteTableRequest; -import com.amazonaws.services.ec2.model.DisassociateRouteTableResult; -import com.amazonaws.services.ec2.model.DisassociateSubnetCidrBlockRequest; -import com.amazonaws.services.ec2.model.DisassociateSubnetCidrBlockResult; -import com.amazonaws.services.ec2.model.DisassociateTransitGatewayRouteTableRequest; -import com.amazonaws.services.ec2.model.DisassociateTransitGatewayRouteTableResult; -import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockRequest; -import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockResult; -import com.amazonaws.services.ec2.model.DryRunResult; -import com.amazonaws.services.ec2.model.DryRunSupportedRequest; -import com.amazonaws.services.ec2.model.EnableTransitGatewayRouteTablePropagationRequest; -import com.amazonaws.services.ec2.model.EnableTransitGatewayRouteTablePropagationResult; -import com.amazonaws.services.ec2.model.EnableVgwRoutePropagationRequest; -import com.amazonaws.services.ec2.model.EnableVgwRoutePropagationResult; -import com.amazonaws.services.ec2.model.EnableVolumeIORequest; -import com.amazonaws.services.ec2.model.EnableVolumeIOResult; -import com.amazonaws.services.ec2.model.EnableVpcClassicLinkDnsSupportRequest; -import com.amazonaws.services.ec2.model.EnableVpcClassicLinkDnsSupportResult; -import com.amazonaws.services.ec2.model.EnableVpcClassicLinkRequest; -import com.amazonaws.services.ec2.model.EnableVpcClassicLinkResult; -import com.amazonaws.services.ec2.model.ExportClientVpnClientCertificateRevocationListRequest; -import com.amazonaws.services.ec2.model.ExportClientVpnClientCertificateRevocationListResult; -import com.amazonaws.services.ec2.model.ExportClientVpnClientConfigurationRequest; -import com.amazonaws.services.ec2.model.ExportClientVpnClientConfigurationResult; -import com.amazonaws.services.ec2.model.ExportTransitGatewayRoutesRequest; -import com.amazonaws.services.ec2.model.ExportTransitGatewayRoutesResult; import com.amazonaws.services.ec2.model.Filter; -import com.amazonaws.services.ec2.model.GetConsoleOutputRequest; -import com.amazonaws.services.ec2.model.GetConsoleOutputResult; -import com.amazonaws.services.ec2.model.GetConsoleScreenshotRequest; -import com.amazonaws.services.ec2.model.GetConsoleScreenshotResult; -import com.amazonaws.services.ec2.model.GetHostReservationPurchasePreviewRequest; -import com.amazonaws.services.ec2.model.GetHostReservationPurchasePreviewResult; -import com.amazonaws.services.ec2.model.GetLaunchTemplateDataRequest; -import com.amazonaws.services.ec2.model.GetLaunchTemplateDataResult; -import com.amazonaws.services.ec2.model.GetPasswordDataRequest; -import com.amazonaws.services.ec2.model.GetPasswordDataResult; -import com.amazonaws.services.ec2.model.GetReservedInstancesExchangeQuoteRequest; -import com.amazonaws.services.ec2.model.GetReservedInstancesExchangeQuoteResult; -import com.amazonaws.services.ec2.model.GetTransitGatewayAttachmentPropagationsRequest; -import com.amazonaws.services.ec2.model.GetTransitGatewayAttachmentPropagationsResult; -import com.amazonaws.services.ec2.model.GetTransitGatewayRouteTableAssociationsRequest; -import com.amazonaws.services.ec2.model.GetTransitGatewayRouteTableAssociationsResult; -import com.amazonaws.services.ec2.model.GetTransitGatewayRouteTablePropagationsRequest; -import com.amazonaws.services.ec2.model.GetTransitGatewayRouteTablePropagationsResult; -import com.amazonaws.services.ec2.model.ImportClientVpnClientCertificateRevocationListRequest; -import com.amazonaws.services.ec2.model.ImportClientVpnClientCertificateRevocationListResult; -import com.amazonaws.services.ec2.model.ImportImageRequest; -import com.amazonaws.services.ec2.model.ImportImageResult; -import com.amazonaws.services.ec2.model.ImportInstanceRequest; -import com.amazonaws.services.ec2.model.ImportInstanceResult; -import com.amazonaws.services.ec2.model.ImportKeyPairRequest; -import com.amazonaws.services.ec2.model.ImportKeyPairResult; -import com.amazonaws.services.ec2.model.ImportSnapshotRequest; -import com.amazonaws.services.ec2.model.ImportSnapshotResult; -import com.amazonaws.services.ec2.model.ImportVolumeRequest; -import com.amazonaws.services.ec2.model.ImportVolumeResult; import com.amazonaws.services.ec2.model.Instance; import com.amazonaws.services.ec2.model.InstanceState; import com.amazonaws.services.ec2.model.InstanceStateName; -import com.amazonaws.services.ec2.model.ModifyCapacityReservationRequest; -import com.amazonaws.services.ec2.model.ModifyCapacityReservationResult; -import com.amazonaws.services.ec2.model.ModifyClientVpnEndpointRequest; -import com.amazonaws.services.ec2.model.ModifyClientVpnEndpointResult; -import com.amazonaws.services.ec2.model.ModifyFleetRequest; -import com.amazonaws.services.ec2.model.ModifyFleetResult; -import com.amazonaws.services.ec2.model.ModifyFpgaImageAttributeRequest; -import com.amazonaws.services.ec2.model.ModifyFpgaImageAttributeResult; -import com.amazonaws.services.ec2.model.ModifyHostsRequest; -import com.amazonaws.services.ec2.model.ModifyHostsResult; -import com.amazonaws.services.ec2.model.ModifyIdFormatRequest; -import com.amazonaws.services.ec2.model.ModifyIdFormatResult; -import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatRequest; -import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatResult; -import com.amazonaws.services.ec2.model.ModifyImageAttributeRequest; -import com.amazonaws.services.ec2.model.ModifyImageAttributeResult; -import com.amazonaws.services.ec2.model.ModifyInstanceAttributeRequest; -import com.amazonaws.services.ec2.model.ModifyInstanceAttributeResult; -import com.amazonaws.services.ec2.model.ModifyInstanceCapacityReservationAttributesRequest; -import com.amazonaws.services.ec2.model.ModifyInstanceCapacityReservationAttributesResult; -import com.amazonaws.services.ec2.model.ModifyInstanceCreditSpecificationRequest; -import com.amazonaws.services.ec2.model.ModifyInstanceCreditSpecificationResult; -import com.amazonaws.services.ec2.model.ModifyInstancePlacementRequest; -import com.amazonaws.services.ec2.model.ModifyInstancePlacementResult; -import com.amazonaws.services.ec2.model.ModifyLaunchTemplateRequest; -import com.amazonaws.services.ec2.model.ModifyLaunchTemplateResult; -import com.amazonaws.services.ec2.model.ModifyNetworkInterfaceAttributeRequest; -import com.amazonaws.services.ec2.model.ModifyNetworkInterfaceAttributeResult; -import com.amazonaws.services.ec2.model.ModifyReservedInstancesRequest; -import com.amazonaws.services.ec2.model.ModifyReservedInstancesResult; -import com.amazonaws.services.ec2.model.ModifySnapshotAttributeRequest; -import com.amazonaws.services.ec2.model.ModifySnapshotAttributeResult; -import com.amazonaws.services.ec2.model.ModifySpotFleetRequestRequest; -import com.amazonaws.services.ec2.model.ModifySpotFleetRequestResult; -import com.amazonaws.services.ec2.model.ModifySubnetAttributeRequest; -import com.amazonaws.services.ec2.model.ModifySubnetAttributeResult; -import com.amazonaws.services.ec2.model.ModifyTransitGatewayVpcAttachmentRequest; -import com.amazonaws.services.ec2.model.ModifyTransitGatewayVpcAttachmentResult; -import com.amazonaws.services.ec2.model.ModifyVolumeAttributeRequest; -import com.amazonaws.services.ec2.model.ModifyVolumeAttributeResult; -import com.amazonaws.services.ec2.model.ModifyVolumeRequest; -import com.amazonaws.services.ec2.model.ModifyVolumeResult; -import com.amazonaws.services.ec2.model.ModifyVpcAttributeRequest; -import com.amazonaws.services.ec2.model.ModifyVpcAttributeResult; -import com.amazonaws.services.ec2.model.ModifyVpcEndpointConnectionNotificationRequest; -import com.amazonaws.services.ec2.model.ModifyVpcEndpointConnectionNotificationResult; -import com.amazonaws.services.ec2.model.ModifyVpcEndpointRequest; -import com.amazonaws.services.ec2.model.ModifyVpcEndpointResult; -import com.amazonaws.services.ec2.model.ModifyVpcEndpointServiceConfigurationRequest; -import com.amazonaws.services.ec2.model.ModifyVpcEndpointServiceConfigurationResult; -import com.amazonaws.services.ec2.model.ModifyVpcEndpointServicePermissionsRequest; -import com.amazonaws.services.ec2.model.ModifyVpcEndpointServicePermissionsResult; -import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsRequest; -import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsResult; -import com.amazonaws.services.ec2.model.ModifyVpcTenancyRequest; -import com.amazonaws.services.ec2.model.ModifyVpcTenancyResult; -import com.amazonaws.services.ec2.model.MonitorInstancesRequest; -import com.amazonaws.services.ec2.model.MonitorInstancesResult; -import com.amazonaws.services.ec2.model.MoveAddressToVpcRequest; -import com.amazonaws.services.ec2.model.MoveAddressToVpcResult; -import com.amazonaws.services.ec2.model.ProvisionByoipCidrRequest; -import com.amazonaws.services.ec2.model.ProvisionByoipCidrResult; -import com.amazonaws.services.ec2.model.PurchaseHostReservationRequest; -import com.amazonaws.services.ec2.model.PurchaseHostReservationResult; -import com.amazonaws.services.ec2.model.PurchaseReservedInstancesOfferingRequest; -import com.amazonaws.services.ec2.model.PurchaseReservedInstancesOfferingResult; -import com.amazonaws.services.ec2.model.PurchaseScheduledInstancesRequest; -import com.amazonaws.services.ec2.model.PurchaseScheduledInstancesResult; -import com.amazonaws.services.ec2.model.RebootInstancesRequest; -import com.amazonaws.services.ec2.model.RebootInstancesResult; -import com.amazonaws.services.ec2.model.RegisterImageRequest; -import com.amazonaws.services.ec2.model.RegisterImageResult; -import com.amazonaws.services.ec2.model.RejectTransitGatewayVpcAttachmentRequest; -import com.amazonaws.services.ec2.model.RejectTransitGatewayVpcAttachmentResult; -import com.amazonaws.services.ec2.model.RejectVpcEndpointConnectionsRequest; -import com.amazonaws.services.ec2.model.RejectVpcEndpointConnectionsResult; -import com.amazonaws.services.ec2.model.RejectVpcPeeringConnectionRequest; -import com.amazonaws.services.ec2.model.RejectVpcPeeringConnectionResult; -import com.amazonaws.services.ec2.model.ReleaseAddressRequest; -import com.amazonaws.services.ec2.model.ReleaseAddressResult; -import com.amazonaws.services.ec2.model.ReleaseHostsRequest; -import com.amazonaws.services.ec2.model.ReleaseHostsResult; -import com.amazonaws.services.ec2.model.ReplaceIamInstanceProfileAssociationRequest; -import com.amazonaws.services.ec2.model.ReplaceIamInstanceProfileAssociationResult; -import com.amazonaws.services.ec2.model.ReplaceNetworkAclAssociationRequest; -import com.amazonaws.services.ec2.model.ReplaceNetworkAclAssociationResult; -import com.amazonaws.services.ec2.model.ReplaceNetworkAclEntryRequest; -import com.amazonaws.services.ec2.model.ReplaceNetworkAclEntryResult; -import com.amazonaws.services.ec2.model.ReplaceRouteRequest; -import com.amazonaws.services.ec2.model.ReplaceRouteResult; -import com.amazonaws.services.ec2.model.ReplaceRouteTableAssociationRequest; -import com.amazonaws.services.ec2.model.ReplaceRouteTableAssociationResult; -import com.amazonaws.services.ec2.model.ReplaceTransitGatewayRouteRequest; -import com.amazonaws.services.ec2.model.ReplaceTransitGatewayRouteResult; -import com.amazonaws.services.ec2.model.ReportInstanceStatusRequest; -import com.amazonaws.services.ec2.model.ReportInstanceStatusResult; -import com.amazonaws.services.ec2.model.RequestSpotFleetRequest; -import com.amazonaws.services.ec2.model.RequestSpotFleetResult; -import com.amazonaws.services.ec2.model.RequestSpotInstancesRequest; -import com.amazonaws.services.ec2.model.RequestSpotInstancesResult; import com.amazonaws.services.ec2.model.Reservation; -import com.amazonaws.services.ec2.model.ResetFpgaImageAttributeRequest; -import com.amazonaws.services.ec2.model.ResetFpgaImageAttributeResult; -import com.amazonaws.services.ec2.model.ResetImageAttributeRequest; -import com.amazonaws.services.ec2.model.ResetImageAttributeResult; -import com.amazonaws.services.ec2.model.ResetInstanceAttributeRequest; -import com.amazonaws.services.ec2.model.ResetInstanceAttributeResult; -import com.amazonaws.services.ec2.model.ResetNetworkInterfaceAttributeRequest; -import com.amazonaws.services.ec2.model.ResetNetworkInterfaceAttributeResult; -import com.amazonaws.services.ec2.model.ResetSnapshotAttributeRequest; -import com.amazonaws.services.ec2.model.ResetSnapshotAttributeResult; -import com.amazonaws.services.ec2.model.RestoreAddressToClassicRequest; -import com.amazonaws.services.ec2.model.RestoreAddressToClassicResult; -import com.amazonaws.services.ec2.model.RevokeClientVpnIngressRequest; -import com.amazonaws.services.ec2.model.RevokeClientVpnIngressResult; -import com.amazonaws.services.ec2.model.RevokeSecurityGroupEgressRequest; -import com.amazonaws.services.ec2.model.RevokeSecurityGroupEgressResult; -import com.amazonaws.services.ec2.model.RevokeSecurityGroupIngressRequest; -import com.amazonaws.services.ec2.model.RevokeSecurityGroupIngressResult; -import com.amazonaws.services.ec2.model.RunInstancesRequest; -import com.amazonaws.services.ec2.model.RunInstancesResult; -import com.amazonaws.services.ec2.model.RunScheduledInstancesRequest; -import com.amazonaws.services.ec2.model.RunScheduledInstancesResult; -import com.amazonaws.services.ec2.model.SearchTransitGatewayRoutesRequest; -import com.amazonaws.services.ec2.model.SearchTransitGatewayRoutesResult; -import com.amazonaws.services.ec2.model.StartInstancesRequest; -import com.amazonaws.services.ec2.model.StartInstancesResult; -import com.amazonaws.services.ec2.model.StopInstancesRequest; -import com.amazonaws.services.ec2.model.StopInstancesResult; import com.amazonaws.services.ec2.model.Tag; -import com.amazonaws.services.ec2.model.TerminateClientVpnConnectionsRequest; -import com.amazonaws.services.ec2.model.TerminateClientVpnConnectionsResult; -import com.amazonaws.services.ec2.model.TerminateInstancesRequest; -import com.amazonaws.services.ec2.model.TerminateInstancesResult; -import com.amazonaws.services.ec2.model.UnassignIpv6AddressesRequest; -import com.amazonaws.services.ec2.model.UnassignIpv6AddressesResult; -import com.amazonaws.services.ec2.model.UnassignPrivateIpAddressesRequest; -import com.amazonaws.services.ec2.model.UnassignPrivateIpAddressesResult; -import com.amazonaws.services.ec2.model.UnmonitorInstancesRequest; -import com.amazonaws.services.ec2.model.UnmonitorInstancesResult; -import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsEgressRequest; -import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsEgressResult; -import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsIngressRequest; -import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsIngressResult; -import com.amazonaws.services.ec2.model.WithdrawByoipCidrRequest; -import com.amazonaws.services.ec2.model.WithdrawByoipCidrResult; -import com.amazonaws.services.ec2.waiters.AmazonEC2Waiters; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -704,7 +42,7 @@ import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; -public class AmazonEC2Mock implements AmazonEC2 { +public class AmazonEC2Mock extends AbstractAmazonEC2 { private static final Logger logger = LogManager.getLogger(AmazonEC2Mock.class); @@ -830,2250 +168,12 @@ public class AmazonEC2Mock implements AmazonEC2 { ); } - // Not implemented methods in Mock - @Override public void setEndpoint(String endpoint) throws IllegalArgumentException { this.endpoint = endpoint; } - @Override - public void setRegion(Region region) throws IllegalArgumentException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AcceptReservedInstancesExchangeQuoteResult acceptReservedInstancesExchangeQuote( - AcceptReservedInstancesExchangeQuoteRequest acceptReservedInstancesExchangeQuoteRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AcceptTransitGatewayVpcAttachmentResult acceptTransitGatewayVpcAttachment( - AcceptTransitGatewayVpcAttachmentRequest acceptTransitGatewayVpcAttachmentRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AcceptVpcEndpointConnectionsResult acceptVpcEndpointConnections( - AcceptVpcEndpointConnectionsRequest acceptVpcEndpointConnectionsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RebootInstancesResult rebootInstances(RebootInstancesRequest rebootInstancesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeReservedInstancesResult describeReservedInstances( - DescribeReservedInstancesRequest describeReservedInstancesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateFlowLogsResult createFlowLogs(CreateFlowLogsRequest createFlowLogsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeAvailabilityZonesResult describeAvailabilityZones(DescribeAvailabilityZonesRequest describeAvailabilityZonesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RestoreAddressToClassicResult restoreAddressToClassic(RestoreAddressToClassicRequest restoreAddressToClassicRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RevokeClientVpnIngressResult revokeClientVpnIngress(RevokeClientVpnIngressRequest revokeClientVpnIngressRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DetachVolumeResult detachVolume(DetachVolumeRequest detachVolumeRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteKeyPairResult deleteKeyPair(DeleteKeyPairRequest deleteKeyPairRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteLaunchTemplateResult deleteLaunchTemplate(DeleteLaunchTemplateRequest deleteLaunchTemplateRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteLaunchTemplateVersionsResult deleteLaunchTemplateVersions( - DeleteLaunchTemplateVersionsRequest deleteLaunchTemplateVersionsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteNatGatewayResult deleteNatGateway(DeleteNatGatewayRequest deleteNatGatewayRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public UnmonitorInstancesResult unmonitorInstances(UnmonitorInstancesRequest unmonitorInstancesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public UpdateSecurityGroupRuleDescriptionsIngressResult updateSecurityGroupRuleDescriptionsIngress( - UpdateSecurityGroupRuleDescriptionsIngressRequest updateSecurityGroupRuleDescriptionsIngressRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public WithdrawByoipCidrResult withdrawByoipCidr(WithdrawByoipCidrRequest withdrawByoipCidrRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public UpdateSecurityGroupRuleDescriptionsEgressResult updateSecurityGroupRuleDescriptionsEgress( - UpdateSecurityGroupRuleDescriptionsEgressRequest updateSecurityGroupRuleDescriptionsEgressRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AttachVpnGatewayResult attachVpnGateway(AttachVpnGatewayRequest attachVpnGatewayRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AuthorizeClientVpnIngressResult authorizeClientVpnIngress(AuthorizeClientVpnIngressRequest authorizeClientVpnIngressRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateImageResult createImage(CreateImageRequest createImageRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteSecurityGroupResult deleteSecurityGroup(DeleteSecurityGroupRequest deleteSecurityGroupRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateInstanceExportTaskResult createInstanceExportTask(CreateInstanceExportTaskRequest createInstanceExportTaskRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AuthorizeSecurityGroupEgressResult authorizeSecurityGroupEgress( - AuthorizeSecurityGroupEgressRequest authorizeSecurityGroupEgressRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AssociateDhcpOptionsResult associateDhcpOptions(AssociateDhcpOptionsRequest associateDhcpOptionsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public GetPasswordDataResult getPasswordData(GetPasswordDataRequest getPasswordDataRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public GetReservedInstancesExchangeQuoteResult getReservedInstancesExchangeQuote( - GetReservedInstancesExchangeQuoteRequest getReservedInstancesExchangeQuoteRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public GetTransitGatewayAttachmentPropagationsResult getTransitGatewayAttachmentPropagations( - GetTransitGatewayAttachmentPropagationsRequest getTransitGatewayAttachmentPropagationsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public GetTransitGatewayRouteTableAssociationsResult getTransitGatewayRouteTableAssociations( - GetTransitGatewayRouteTableAssociationsRequest getTransitGatewayRouteTableAssociationsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public GetTransitGatewayRouteTablePropagationsResult getTransitGatewayRouteTablePropagations( - GetTransitGatewayRouteTablePropagationsRequest getTransitGatewayRouteTablePropagationsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ImportClientVpnClientCertificateRevocationListResult importClientVpnClientCertificateRevocationList( - ImportClientVpnClientCertificateRevocationListRequest importClientVpnClientCertificateRevocationListRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public StopInstancesResult stopInstances(StopInstancesRequest stopInstancesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public TerminateClientVpnConnectionsResult terminateClientVpnConnections( - TerminateClientVpnConnectionsRequest terminateClientVpnConnectionsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ImportKeyPairResult importKeyPair(ImportKeyPairRequest importKeyPairRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteNetworkInterfaceResult deleteNetworkInterface(DeleteNetworkInterfaceRequest deleteNetworkInterfaceRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyVpcAttributeResult modifyVpcAttribute(ModifyVpcAttributeRequest modifyVpcAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSpotFleetInstancesResult describeSpotFleetInstances(DescribeSpotFleetInstancesRequest describeSpotFleetInstancesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateSecurityGroupResult createSecurityGroup(CreateSecurityGroupRequest createSecurityGroupRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSpotPriceHistoryResult describeSpotPriceHistory(DescribeSpotPriceHistoryRequest describeSpotPriceHistoryRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeNetworkInterfacesResult describeNetworkInterfaces(DescribeNetworkInterfacesRequest describeNetworkInterfacesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeNetworkInterfacePermissionsResult describeNetworkInterfacePermissions( - DescribeNetworkInterfacePermissionsRequest describeNetworkInterfacePermissionsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeRegionsResult describeRegions(DescribeRegionsRequest describeRegionsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateDhcpOptionsResult createDhcpOptions(CreateDhcpOptionsRequest createDhcpOptionsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateReservedInstancesListingResult createReservedInstancesListing( - CreateReservedInstancesListingRequest createReservedInstancesListingRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteVpcEndpointsResult deleteVpcEndpoints(DeleteVpcEndpointsRequest deleteVpcEndpointsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ResetSnapshotAttributeResult resetSnapshotAttribute(ResetSnapshotAttributeRequest resetSnapshotAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteRouteResult deleteRoute(DeleteRouteRequest deleteRouteRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeInternetGatewaysResult describeInternetGateways(DescribeInternetGatewaysRequest describeInternetGatewaysRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ImportVolumeResult importVolume(ImportVolumeRequest importVolumeRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyCapacityReservationResult modifyCapacityReservation(ModifyCapacityReservationRequest modifyCapacityReservationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyClientVpnEndpointResult modifyClientVpnEndpoint(ModifyClientVpnEndpointRequest modifyClientVpnEndpointRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyFleetResult modifyFleet(ModifyFleetRequest modifyFleetRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyFpgaImageAttributeResult modifyFpgaImageAttribute(ModifyFpgaImageAttributeRequest modifyFpgaImageAttributeRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyHostsResult modifyHosts(ModifyHostsRequest modifyHostsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyIdFormatResult modifyIdFormat(ModifyIdFormatRequest modifyIdFormatRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSecurityGroupsResult describeSecurityGroups(DescribeSecurityGroupsRequest describeSecurityGroupsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeStaleSecurityGroupsResult describeStaleSecurityGroups( - DescribeStaleSecurityGroupsRequest describeStaleSecurityGroupsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSecurityGroupReferencesResult describeSecurityGroupReferences( - DescribeSecurityGroupReferencesRequest describeSecurityGroupReferencesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RejectVpcPeeringConnectionResult rejectVpcPeeringConnection( - RejectVpcPeeringConnectionRequest rejectVpcPeeringConnectionRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyVpcPeeringConnectionOptionsResult modifyVpcPeeringConnectionOptions( - ModifyVpcPeeringConnectionOptionsRequest modifyVpcPeeringConnectionOptionsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyVpcTenancyResult modifyVpcTenancy(ModifyVpcTenancyRequest modifyVpcTenancyRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteFlowLogsResult deleteFlowLogs(DeleteFlowLogsRequest deleteFlowLogsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteFpgaImageResult deleteFpgaImage(DeleteFpgaImageRequest deleteFpgaImageRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DetachVpnGatewayResult detachVpnGateway(DetachVpnGatewayRequest detachVpnGatewayRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DisableTransitGatewayRouteTablePropagationResult disableTransitGatewayRouteTablePropagation( - DisableTransitGatewayRouteTablePropagationRequest disableTransitGatewayRouteTablePropagationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeregisterImageResult deregisterImage(DeregisterImageRequest deregisterImageRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSpotDatafeedSubscriptionResult describeSpotDatafeedSubscription( - DescribeSpotDatafeedSubscriptionRequest describeSpotDatafeedSubscriptionRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteTagsResult deleteTags(DeleteTagsRequest deleteTagsRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteTransitGatewayResult deleteTransitGateway(DeleteTransitGatewayRequest deleteTransitGatewayRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteTransitGatewayRouteResult deleteTransitGatewayRoute(DeleteTransitGatewayRouteRequest deleteTransitGatewayRouteRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteTransitGatewayRouteTableResult deleteTransitGatewayRouteTable( - DeleteTransitGatewayRouteTableRequest deleteTransitGatewayRouteTableRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteTransitGatewayVpcAttachmentResult deleteTransitGatewayVpcAttachment( - DeleteTransitGatewayVpcAttachmentRequest deleteTransitGatewayVpcAttachmentRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteSubnetResult deleteSubnet(DeleteSubnetRequest deleteSubnetRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeAccountAttributesResult describeAccountAttributes(DescribeAccountAttributesRequest describeAccountAttributesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AttachClassicLinkVpcResult attachClassicLinkVpc(AttachClassicLinkVpcRequest attachClassicLinkVpcRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateVpnGatewayResult createVpnGateway(CreateVpnGatewayRequest createVpnGatewayRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteClientVpnEndpointResult deleteClientVpnEndpoint(DeleteClientVpnEndpointRequest deleteClientVpnEndpointRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteClientVpnRouteResult deleteClientVpnRoute(DeleteClientVpnRouteRequest deleteClientVpnRouteRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public EnableVolumeIOResult enableVolumeIO(EnableVolumeIORequest enableVolumeIORequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public MoveAddressToVpcResult moveAddressToVpc(MoveAddressToVpcRequest moveAddressToVpcRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ProvisionByoipCidrResult provisionByoipCidr(ProvisionByoipCidrRequest provisionByoipCidrRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteVpnGatewayResult deleteVpnGateway(DeleteVpnGatewayRequest deleteVpnGatewayRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeprovisionByoipCidrResult deprovisionByoipCidr(DeprovisionByoipCidrRequest deprovisionByoipCidrRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AttachVolumeResult attachVolume(AttachVolumeRequest attachVolumeRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVolumeStatusResult describeVolumeStatus(DescribeVolumeStatusRequest describeVolumeStatusRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVolumesModificationsResult describeVolumesModifications( - DescribeVolumesModificationsRequest describeVolumesModificationsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeImportSnapshotTasksResult describeImportSnapshotTasks( - DescribeImportSnapshotTasksRequest describeImportSnapshotTasksRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpnConnectionsResult describeVpnConnections(DescribeVpnConnectionsRequest describeVpnConnectionsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ResetImageAttributeResult resetImageAttribute(ResetImageAttributeRequest resetImageAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public EnableVgwRoutePropagationResult enableVgwRoutePropagation(EnableVgwRoutePropagationRequest enableVgwRoutePropagationRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateSnapshotResult createSnapshot(CreateSnapshotRequest createSnapshotRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteVolumeResult deleteVolume(DeleteVolumeRequest deleteVolumeRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateNetworkInterfaceResult createNetworkInterface(CreateNetworkInterfaceRequest createNetworkInterfaceRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyReservedInstancesResult modifyReservedInstances(ModifyReservedInstancesRequest modifyReservedInstancesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CancelSpotFleetRequestsResult cancelSpotFleetRequests(CancelSpotFleetRequestsRequest cancelSpotFleetRequestsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public UnassignPrivateIpAddressesResult unassignPrivateIpAddresses(UnassignPrivateIpAddressesRequest unassignPrivateIpAddressesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public UnassignIpv6AddressesResult unassignIpv6Addresses(UnassignIpv6AddressesRequest unassignIpv6AddressesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcsResult describeVpcs(DescribeVpcsRequest describeVpcsRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CancelConversionTaskResult cancelConversionTask(CancelConversionTaskRequest cancelConversionTaskRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AssociateAddressResult associateAddress(AssociateAddressRequest associateAddressRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AssociateClientVpnTargetNetworkResult associateClientVpnTargetNetwork( - AssociateClientVpnTargetNetworkRequest associateClientVpnTargetNetworkRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AssociateIamInstanceProfileResult associateIamInstanceProfile(AssociateIamInstanceProfileRequest associateIamInstanceRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AssociateVpcCidrBlockResult associateVpcCidrBlock(AssociateVpcCidrBlockRequest associateVpcCidrBlockRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AssociateSubnetCidrBlockResult associateSubnetCidrBlock(AssociateSubnetCidrBlockRequest associateSubnetCidrBlockRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AssociateTransitGatewayRouteTableResult associateTransitGatewayRouteTable( - AssociateTransitGatewayRouteTableRequest associateTransitGatewayRouteTableRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteCustomerGatewayResult deleteCustomerGateway(DeleteCustomerGatewayRequest deleteCustomerGatewayRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateNetworkAclEntryResult createNetworkAclEntry(CreateNetworkAclEntryRequest createNetworkAclEntryRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AcceptVpcPeeringConnectionResult acceptVpcPeeringConnection(AcceptVpcPeeringConnectionRequest acceptVpcPeeringConnectionRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeExportTasksResult describeExportTasks(DescribeExportTasksRequest describeExportTasksRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeElasticGpusResult describeElasticGpus(DescribeElasticGpusRequest describeElasticGpusRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeFpgaImagesResult describeFpgaImages(DescribeFpgaImagesRequest describeFpgaImagesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeHostReservationOfferingsResult describeHostReservationOfferings( - DescribeHostReservationOfferingsRequest describeHostReservationOfferingsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeHostReservationsResult describeHostReservations(DescribeHostReservationsRequest describeHostReservationsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeIdentityIdFormatResult describeIdentityIdFormat(DescribeIdentityIdFormatRequest describeIdentityIdFormatRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DetachInternetGatewayResult detachInternetGateway(DetachInternetGatewayRequest detachInternetGatewayRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateVpcPeeringConnectionResult createVpcPeeringConnection(CreateVpcPeeringConnectionRequest createVpcPeeringConnectionRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateRouteTableResult createRouteTable(CreateRouteTableRequest createRouteTableRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CancelImportTaskResult cancelImportTask(CancelImportTaskRequest cancelImportTaskRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVolumesResult describeVolumes(DescribeVolumesRequest describeVolumesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeReservedInstancesListingsResult describeReservedInstancesListings( - DescribeReservedInstancesListingsRequest describeReservedInstancesListingsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ReportInstanceStatusResult reportInstanceStatus(ReportInstanceStatusRequest reportInstanceStatusRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeRouteTablesResult describeRouteTables(DescribeRouteTablesRequest describeRouteTablesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeDhcpOptionsResult describeDhcpOptions(DescribeDhcpOptionsRequest describeDhcpOptionsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeEgressOnlyInternetGatewaysResult describeEgressOnlyInternetGateways( - DescribeEgressOnlyInternetGatewaysRequest describeEgressOnlyInternetGatewaysRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public MonitorInstancesResult monitorInstances(MonitorInstancesRequest monitorInstancesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribePrefixListsResult describePrefixLists(DescribePrefixListsRequest describePrefixListsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RequestSpotFleetResult requestSpotFleet(RequestSpotFleetRequest requestSpotFleetRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeImportImageTasksResult describeImportImageTasks(DescribeImportImageTasksRequest describeImportImageTasksRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeNetworkAclsResult describeNetworkAcls(DescribeNetworkAclsRequest describeNetworkAclsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeBundleTasksResult describeBundleTasks(DescribeBundleTasksRequest describeBundleTasksRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ImportInstanceResult importInstance(ImportInstanceRequest importInstanceRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteVpcPeeringConnectionResult deleteVpcPeeringConnection(DeleteVpcPeeringConnectionRequest deleteVpcPeeringConnectionRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public GetConsoleOutputResult getConsoleOutput(GetConsoleOutputRequest getConsoleOutputRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public GetConsoleScreenshotResult getConsoleScreenshot(GetConsoleScreenshotRequest getConsoleScreenshotRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public GetHostReservationPurchasePreviewResult getHostReservationPurchasePreview( - GetHostReservationPurchasePreviewRequest getHostReservationPurchasePreviewRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public GetLaunchTemplateDataResult getLaunchTemplateData(GetLaunchTemplateDataRequest getLaunchTemplateDataRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateInternetGatewayResult createInternetGateway(CreateInternetGatewayRequest createInternetGatewayRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteVpnConnectionRouteResult deleteVpnConnectionRoute(DeleteVpnConnectionRouteRequest deleteVpnConnectionRouteRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DetachNetworkInterfaceResult detachNetworkInterface(DetachNetworkInterfaceRequest detachNetworkInterfaceRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyImageAttributeResult modifyImageAttribute(ModifyImageAttributeRequest modifyImageAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateCustomerGatewayResult createCustomerGateway(CreateCustomerGatewayRequest createCustomerGatewayRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateDefaultSubnetResult createDefaultSubnet(CreateDefaultSubnetRequest createDefaultSubnetRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateEgressOnlyInternetGatewayResult createEgressOnlyInternetGateway( - CreateEgressOnlyInternetGatewayRequest createEgressOnlyInternetGatewayRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateFleetResult createFleet(CreateFleetRequest createFleetRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateFpgaImageResult createFpgaImage(CreateFpgaImageRequest createFpgaImageRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateNetworkInterfacePermissionResult createNetworkInterfacePermission( - CreateNetworkInterfacePermissionRequest createNetworkInterfacePermissionRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateDefaultVpcResult createDefaultVpc(CreateDefaultVpcRequest createDefaultVpcRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateSpotDatafeedSubscriptionResult createSpotDatafeedSubscription( - CreateSpotDatafeedSubscriptionRequest createSpotDatafeedSubscriptionRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AttachInternetGatewayResult attachInternetGateway(AttachInternetGatewayRequest attachInternetGatewayRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteVpnConnectionResult deleteVpnConnection(DeleteVpnConnectionRequest deleteVpnConnectionRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeMovingAddressesResult describeMovingAddresses(DescribeMovingAddressesRequest describeMovingAddressesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeConversionTasksResult describeConversionTasks(DescribeConversionTasksRequest describeConversionTasksRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateVpnConnectionResult createVpnConnection(CreateVpnConnectionRequest createVpnConnectionRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ImportImageResult importImage(ImportImageRequest importImageRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DisableVpcClassicLinkResult disableVpcClassicLink(DisableVpcClassicLinkRequest disableVpcClassicLinkRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DisableVpcClassicLinkDnsSupportResult disableVpcClassicLinkDnsSupport( - DisableVpcClassicLinkDnsSupportRequest disableVpcClassicLinkDnsSupportRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeInstanceAttributeResult describeInstanceAttribute(DescribeInstanceAttributeRequest describeInstanceAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeInstanceCreditSpecificationsResult describeInstanceCreditSpecifications( - DescribeInstanceCreditSpecificationsRequest describeInstanceCreditSpecificationsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeFlowLogsResult describeFlowLogs(DescribeFlowLogsRequest describeFlowLogsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcPeeringConnectionsResult describeVpcPeeringConnections( - DescribeVpcPeeringConnectionsRequest describeVpcPeeringConnectionsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribePlacementGroupsResult describePlacementGroups(DescribePlacementGroupsRequest describePlacementGroupsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RunInstancesResult runInstances(RunInstancesRequest runInstancesRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RunScheduledInstancesResult runScheduledInstances(RunScheduledInstancesRequest runScheduledInstancesRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public SearchTransitGatewayRoutesResult searchTransitGatewayRoutes( - SearchTransitGatewayRoutesRequest searchTransitGatewayRoutesRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSubnetsResult describeSubnets(DescribeSubnetsRequest describeSubnetsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AssociateRouteTableResult associateRouteTable(AssociateRouteTableRequest associateRouteTableRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyVolumeAttributeResult modifyVolumeAttribute(ModifyVolumeAttributeRequest modifyVolumeAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteNetworkAclResult deleteNetworkAcl(DeleteNetworkAclRequest deleteNetworkAclRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeImagesResult describeImages(DescribeImagesRequest describeImagesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public StartInstancesResult startInstances(StartInstancesRequest startInstancesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyInstanceAttributeResult modifyInstanceAttribute(ModifyInstanceAttributeRequest modifyInstanceAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyInstanceCapacityReservationAttributesResult modifyInstanceCapacityReservationAttributes( - ModifyInstanceCapacityReservationAttributesRequest modifyInstanceCapacityReservationAttributesRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyInstanceCreditSpecificationResult modifyInstanceCreditSpecification( - ModifyInstanceCreditSpecificationRequest modifyInstanceCreditSpecificationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyInstancePlacementResult modifyInstancePlacement(ModifyInstancePlacementRequest modifyInstancePlacementRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyLaunchTemplateResult modifyLaunchTemplate(ModifyLaunchTemplateRequest modifyLaunchTemplateRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyIdentityIdFormatResult modifyIdentityIdFormat(ModifyIdentityIdFormatRequest modifyIdentityIdFormatRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CancelReservedInstancesListingResult cancelReservedInstancesListing( - CancelReservedInstancesListingRequest cancelReservedInstancesListingRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteDhcpOptionsResult deleteDhcpOptions(DeleteDhcpOptionsRequest deleteDhcpOptionsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteEgressOnlyInternetGatewayResult deleteEgressOnlyInternetGateway( - DeleteEgressOnlyInternetGatewayRequest deleteEgressOnlyInternetGatewayRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteFleetsResult deleteFleets(DeleteFleetsRequest deleteFleetsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteNetworkInterfacePermissionResult deleteNetworkInterfacePermission( - DeleteNetworkInterfacePermissionRequest deleteNetworkInterfacePermissionRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AuthorizeSecurityGroupIngressResult authorizeSecurityGroupIngress( - AuthorizeSecurityGroupIngressRequest authorizeSecurityGroupIngressRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSpotInstanceRequestsResult describeSpotInstanceRequests( - DescribeSpotInstanceRequestsRequest describeSpotInstanceRequestsRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateVpcResult createVpc(CreateVpcRequest createVpcRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeCustomerGatewaysResult describeCustomerGateways(DescribeCustomerGatewaysRequest describeCustomerGatewaysRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CancelExportTaskResult cancelExportTask(CancelExportTaskRequest cancelExportTaskRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateRouteResult createRoute(CreateRouteRequest createRouteRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateVpcEndpointResult createVpcEndpoint(CreateVpcEndpointRequest createVpcEndpointRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateVpcEndpointConnectionNotificationResult createVpcEndpointConnectionNotification( - CreateVpcEndpointConnectionNotificationRequest createVpcEndpointConnectionNotificationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateVpcEndpointServiceConfigurationResult createVpcEndpointServiceConfiguration( - CreateVpcEndpointServiceConfigurationRequest createVpcEndpointServiceConfigurationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CopyImageResult copyImage(CopyImageRequest copyImageRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcClassicLinkResult describeVpcClassicLink(DescribeVpcClassicLinkRequest describeVpcClassicLinkRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyNetworkInterfaceAttributeResult modifyNetworkInterfaceAttribute( - ModifyNetworkInterfaceAttributeRequest modifyNetworkInterfaceAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteRouteTableResult deleteRouteTable(DeleteRouteTableRequest deleteRouteTableRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeNetworkInterfaceAttributeResult describeNetworkInterfaceAttribute( - DescribeNetworkInterfaceAttributeRequest describeNetworkInterfaceAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeClassicLinkInstancesResult describeClassicLinkInstances( - DescribeClassicLinkInstancesRequest describeClassicLinkInstancesRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RequestSpotInstancesResult requestSpotInstances(RequestSpotInstancesRequest requestSpotInstancesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ResetFpgaImageAttributeResult resetFpgaImageAttribute(ResetFpgaImageAttributeRequest resetFpgaImageAttributeRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateTagsResult createTags(CreateTagsRequest createTagsRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateTransitGatewayResult createTransitGateway(CreateTransitGatewayRequest createTransitGatewayRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateTransitGatewayRouteResult createTransitGatewayRoute(CreateTransitGatewayRouteRequest createTransitGatewayRouteRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateTransitGatewayRouteTableResult createTransitGatewayRouteTable( - CreateTransitGatewayRouteTableRequest createTransitGatewayRouteTableRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateTransitGatewayVpcAttachmentResult createTransitGatewayVpcAttachment( - CreateTransitGatewayVpcAttachmentRequest createTransitGatewayVpcAttachmentRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVolumeAttributeResult describeVolumeAttribute(DescribeVolumeAttributeRequest describeVolumeAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AttachNetworkInterfaceResult attachNetworkInterface(AttachNetworkInterfaceRequest attachNetworkInterfaceRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ReplaceRouteResult replaceRoute(ReplaceRouteRequest replaceRouteRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeTagsResult describeTags(DescribeTagsRequest describeTagsRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CancelBundleTaskResult cancelBundleTask(CancelBundleTaskRequest cancelBundleTaskRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CancelCapacityReservationResult cancelCapacityReservation(CancelCapacityReservationRequest cancelCapacityReservationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DisableVgwRoutePropagationResult disableVgwRoutePropagation(DisableVgwRoutePropagationRequest disableVgwRoutePropagationRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ImportSnapshotResult importSnapshot(ImportSnapshotRequest importSnapshotRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CancelSpotInstanceRequestsResult cancelSpotInstanceRequests(CancelSpotInstanceRequestsRequest cancelSpotInstanceRequestsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSpotFleetRequestsResult describeSpotFleetRequests(DescribeSpotFleetRequestsRequest describeSpotFleetRequestsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public PurchaseReservedInstancesOfferingResult purchaseReservedInstancesOffering( - PurchaseReservedInstancesOfferingRequest purchaseReservedInstancesOfferingRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public PurchaseScheduledInstancesResult purchaseScheduledInstances( - PurchaseScheduledInstancesRequest purchaseScheduledInstancesRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public PurchaseHostReservationResult purchaseHostReservation(PurchaseHostReservationRequest purchaseHostReservationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifySnapshotAttributeResult modifySnapshotAttribute(ModifySnapshotAttributeRequest modifySnapshotAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeReservedInstancesModificationsResult describeReservedInstancesModifications( - DescribeReservedInstancesModificationsRequest describeReservedInstancesModificationsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public TerminateInstancesResult terminateInstances(TerminateInstancesRequest terminateInstancesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyVpcEndpointResult modifyVpcEndpoint(ModifyVpcEndpointRequest modifyVpcEndpointRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyVpcEndpointConnectionNotificationResult modifyVpcEndpointConnectionNotification( - ModifyVpcEndpointConnectionNotificationRequest modifyVpcEndpointConnectionNotificationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyVpcEndpointServiceConfigurationResult modifyVpcEndpointServiceConfiguration( - ModifyVpcEndpointServiceConfigurationRequest modifyVpcEndpointServiceConfigurationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyVpcEndpointServicePermissionsResult modifyVpcEndpointServicePermissions( - ModifyVpcEndpointServicePermissionsRequest modifyVpcEndpointServicePermissionsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteSpotDatafeedSubscriptionResult deleteSpotDatafeedSubscription( - DeleteSpotDatafeedSubscriptionRequest deleteSpotDatafeedSubscriptionRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteInternetGatewayResult deleteInternetGateway(DeleteInternetGatewayRequest deleteInternetGatewayRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSnapshotAttributeResult describeSnapshotAttribute(DescribeSnapshotAttributeRequest describeSnapshotAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ReplaceRouteTableAssociationResult replaceRouteTableAssociation( - ReplaceRouteTableAssociationRequest replaceRouteTableAssociationRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ReplaceTransitGatewayRouteResult replaceTransitGatewayRoute( - ReplaceTransitGatewayRouteRequest replaceTransitGatewayRouteRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeAddressesResult describeAddresses(DescribeAddressesRequest describeAddressesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeImageAttributeResult describeImageAttribute(DescribeImageAttributeRequest describeImageAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeKeyPairsResult describeKeyPairs(DescribeKeyPairsRequest describeKeyPairsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ConfirmProductInstanceResult confirmProductInstance(ConfirmProductInstanceRequest confirmProductInstanceRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CopyFpgaImageResult copyFpgaImage(CopyFpgaImageRequest copyFpgaImageRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DisassociateRouteTableResult disassociateRouteTable(DisassociateRouteTableRequest disassociateRouteTableRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DisassociateIamInstanceProfileResult disassociateIamInstanceProfile( - DisassociateIamInstanceProfileRequest disassociateIamInstanceProfileRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DisassociateVpcCidrBlockResult disassociateVpcCidrBlock(DisassociateVpcCidrBlockRequest disassociateVpcCidrBlockRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public EnableTransitGatewayRouteTablePropagationResult enableTransitGatewayRouteTablePropagation( - EnableTransitGatewayRouteTablePropagationRequest enableTransitGatewayRouteTablePropagationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DisassociateSubnetCidrBlockResult disassociateSubnetCidrBlock( - DisassociateSubnetCidrBlockRequest disassociateSubnetCidrBlockRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DisassociateTransitGatewayRouteTableResult disassociateTransitGatewayRouteTable( - DisassociateTransitGatewayRouteTableRequest disassociateTransitGatewayRouteTableRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcAttributeResult describeVpcAttribute(DescribeVpcAttributeRequest describeVpcAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RevokeSecurityGroupEgressResult revokeSecurityGroupEgress(RevokeSecurityGroupEgressRequest revokeSecurityGroupEgressRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteNetworkAclEntryResult deleteNetworkAclEntry(DeleteNetworkAclEntryRequest deleteNetworkAclEntryRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateVolumeResult createVolume(CreateVolumeRequest createVolumeRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyVolumeResult modifyVolume(ModifyVolumeRequest modifyVolumeRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeInstanceStatusResult describeInstanceStatus(DescribeInstanceStatusRequest describeInstanceStatusRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpnGatewaysResult describeVpnGateways(DescribeVpnGatewaysRequest describeVpnGatewaysRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateSubnetResult createSubnet(CreateSubnetRequest createSubnetRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferings( - DescribeReservedInstancesOfferingsRequest describeReservedInstancesOfferingsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AssignPrivateIpAddressesResult assignPrivateIpAddresses(AssignPrivateIpAddressesRequest assignPrivateIpAddressesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AssignIpv6AddressesResult assignIpv6Addresses(AssignIpv6AddressesRequest assignIpv6AddressesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSpotFleetRequestHistoryResult describeSpotFleetRequestHistory( - DescribeSpotFleetRequestHistoryRequest describeSpotFleetRequestHistoryRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteSnapshotResult deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ReplaceNetworkAclAssociationResult replaceNetworkAclAssociation( - ReplaceNetworkAclAssociationRequest replaceNetworkAclAssociationRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DisassociateAddressResult disassociateAddress(DisassociateAddressRequest disassociateAddressRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DisassociateClientVpnTargetNetworkResult disassociateClientVpnTargetNetwork( - DisassociateClientVpnTargetNetworkRequest disassociateClientVpnTargetNetworkRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreatePlacementGroupResult createPlacementGroup(CreatePlacementGroupRequest createPlacementGroupRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public BundleInstanceResult bundleInstance(BundleInstanceRequest bundleInstanceRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeletePlacementGroupResult deletePlacementGroup(DeletePlacementGroupRequest deletePlacementGroupRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifySubnetAttributeResult modifySubnetAttribute(ModifySubnetAttributeRequest modifySubnetAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifyTransitGatewayVpcAttachmentResult modifyTransitGatewayVpcAttachment( - ModifyTransitGatewayVpcAttachmentRequest modifyTransitGatewayVpcAttachmentRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteVpcResult deleteVpc(DeleteVpcRequest deleteVpcRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteVpcEndpointConnectionNotificationsResult deleteVpcEndpointConnectionNotifications( - DeleteVpcEndpointConnectionNotificationsRequest deleteVpcEndpointConnectionNotificationsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteVpcEndpointServiceConfigurationsResult deleteVpcEndpointServiceConfigurations( - DeleteVpcEndpointServiceConfigurationsRequest deleteVpcEndpointServiceConfigurationsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CopySnapshotResult copySnapshot(CopySnapshotRequest copySnapshotRequest) throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateCapacityReservationResult createCapacityReservation(CreateCapacityReservationRequest createCapacityReservationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateClientVpnEndpointResult createClientVpnEndpoint(CreateClientVpnEndpointRequest createClientVpnEndpointRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateClientVpnRouteResult createClientVpnRoute(CreateClientVpnRouteRequest createClientVpnRouteRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcEndpointServicesResult describeVpcEndpointServices( - DescribeVpcEndpointServicesRequest describeVpcEndpointServicesRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AllocateAddressResult allocateAddress(AllocateAddressRequest allocateAddressRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ReleaseAddressResult releaseAddress(ReleaseAddressRequest releaseAddressRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ReleaseHostsResult releaseHosts(ReleaseHostsRequest releaseHostsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ReplaceIamInstanceProfileAssociationResult replaceIamInstanceProfileAssociation( - ReplaceIamInstanceProfileAssociationRequest replaceIamInstanceProfileAssociationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ResetInstanceAttributeResult resetInstanceAttribute(ResetInstanceAttributeRequest resetInstanceAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateKeyPairResult createKeyPair(CreateKeyPairRequest createKeyPairRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateLaunchTemplateResult createLaunchTemplate(CreateLaunchTemplateRequest createLaunchTemplateRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateLaunchTemplateVersionResult createLaunchTemplateVersion( - CreateLaunchTemplateVersionRequest createLaunchTemplateVersionRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateNatGatewayResult createNatGateway(CreateNatGatewayRequest createNatGatewayRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ReplaceNetworkAclEntryResult replaceNetworkAclEntry(ReplaceNetworkAclEntryRequest replaceNetworkAclEntryRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSnapshotsResult describeSnapshots(DescribeSnapshotsRequest describeSnapshotsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateNetworkAclResult createNetworkAcl(CreateNetworkAclRequest createNetworkAclRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RegisterImageResult registerImage(RegisterImageRequest registerImageRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RejectTransitGatewayVpcAttachmentResult rejectTransitGatewayVpcAttachment( - RejectTransitGatewayVpcAttachmentRequest rejectTransitGatewayVpcAttachmentRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RejectVpcEndpointConnectionsResult rejectVpcEndpointConnections( - RejectVpcEndpointConnectionsRequest rejectVpcEndpointConnectionsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ResetNetworkInterfaceAttributeResult resetNetworkInterfaceAttribute( - ResetNetworkInterfaceAttributeRequest resetNetworkInterfaceAttributeRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public EnableVpcClassicLinkResult enableVpcClassicLink(EnableVpcClassicLinkRequest enableVpcClassicLinkRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public EnableVpcClassicLinkDnsSupportResult enableVpcClassicLinkDnsSupport( - EnableVpcClassicLinkDnsSupportRequest enableVpcClassicLinkDnsSupportRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ExportClientVpnClientCertificateRevocationListResult exportClientVpnClientCertificateRevocationList( - ExportClientVpnClientCertificateRevocationListRequest exportClientVpnClientCertificateRevocationListRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ExportClientVpnClientConfigurationResult exportClientVpnClientConfiguration( - ExportClientVpnClientConfigurationRequest exportClientVpnClientConfigurationRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ExportTransitGatewayRoutesResult exportTransitGatewayRoutes( - ExportTransitGatewayRoutesRequest exportTransitGatewayRoutesRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateVpnConnectionRouteResult createVpnConnectionRoute(CreateVpnConnectionRouteRequest createVpnConnectionRouteRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcEndpointsResult describeVpcEndpoints(DescribeVpcEndpointsRequest describeVpcEndpointsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DetachClassicLinkVpcResult detachClassicLinkVpc(DetachClassicLinkVpcRequest detachClassicLinkVpcRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeReservedInstancesResult describeReservedInstances() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeAvailabilityZonesResult describeAvailabilityZones() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSpotPriceHistoryResult describeSpotPriceHistory() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeNetworkInterfacesResult describeNetworkInterfaces() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeRegionsResult describeRegions() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeInternetGatewaysResult describeInternetGateways() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSecurityGroupsResult describeSecurityGroups() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSpotDatafeedSubscriptionResult describeSpotDatafeedSubscription() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeAccountAttributesResult describeAccountAttributes() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVolumeStatusResult describeVolumeStatus() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeImportSnapshotTasksResult describeImportSnapshotTasks() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpnConnectionsResult describeVpnConnections() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcsResult describeVpcs() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AcceptVpcPeeringConnectionResult acceptVpcPeeringConnection() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AdvertiseByoipCidrResult advertiseByoipCidr(AdvertiseByoipCidrRequest advertiseByoipCidrRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeExportTasksResult describeExportTasks() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeFleetHistoryResult describeFleetHistory(DescribeFleetHistoryRequest describeFleetHistoryRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeFleetInstancesResult describeFleetInstances(DescribeFleetInstancesRequest describeFleetInstancesRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeFleetsResult describeFleets(DescribeFleetsRequest describeFleetsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateVpcPeeringConnectionResult createVpcPeeringConnection() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CancelImportTaskResult cancelImportTask() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVolumesResult describeVolumes() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeReservedInstancesListingsResult describeReservedInstancesListings() - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeRouteTablesResult describeRouteTables() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeScheduledInstanceAvailabilityResult describeScheduledInstanceAvailability( - DescribeScheduledInstanceAvailabilityRequest describeScheduledInstanceAvailabilityRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeScheduledInstancesResult describeScheduledInstances( - DescribeScheduledInstancesRequest describeScheduledInstancesRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeDhcpOptionsResult describeDhcpOptions() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribePrefixListsResult describePrefixLists() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribePrincipalIdFormatResult describePrincipalIdFormat(DescribePrincipalIdFormatRequest describePrincipalIdFormatRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribePublicIpv4PoolsResult describePublicIpv4Pools(DescribePublicIpv4PoolsRequest describePublicIpv4PoolsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeImportImageTasksResult describeImportImageTasks() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeNetworkAclsResult describeNetworkAcls() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeBundleTasksResult describeBundleTasks() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeByoipCidrsResult describeByoipCidrs(DescribeByoipCidrsRequest describeByoipCidrsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeCapacityReservationsResult describeCapacityReservations( - DescribeCapacityReservationsRequest describeCapacityReservationsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RevokeSecurityGroupIngressResult revokeSecurityGroupIngress(RevokeSecurityGroupIngressRequest revokeSecurityGroupIngressRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public RevokeSecurityGroupIngressResult revokeSecurityGroupIngress() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public CreateInternetGatewayResult createInternetGateway() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeMovingAddressesResult describeMovingAddresses() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeNatGatewaysResult describeNatGateways(DescribeNatGatewaysRequest describeNatGatewaysRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeConversionTasksResult describeConversionTasks() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ImportImageResult importImage() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeFlowLogsResult describeFlowLogs() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeFpgaImageAttributeResult describeFpgaImageAttribute( - DescribeFpgaImageAttributeRequest describeFpgaImageAttributeRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeHostsResult describeHosts(DescribeHostsRequest describeHostsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeHostsResult describeHosts() { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeIamInstanceProfileAssociationsResult describeIamInstanceProfileAssociations( - DescribeIamInstanceProfileAssociationsRequest describeIamInstanceProfileAssociationsRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeIdFormatResult describeIdFormat(DescribeIdFormatRequest describeIdFormatRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeIdFormatResult describeIdFormat() { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcPeeringConnectionsResult describeVpcPeeringConnections() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribePlacementGroupsResult describePlacementGroups() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSubnetsResult describeSubnets() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeInstancesResult describeInstances() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeImagesResult describeImages() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSpotInstanceRequestsResult describeSpotInstanceRequests() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeCustomerGatewaysResult describeCustomerGateways() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcClassicLinkResult describeVpcClassicLink() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcClassicLinkDnsSupportResult describeVpcClassicLinkDnsSupport( - DescribeVpcClassicLinkDnsSupportRequest describeVpcClassicLinkDnsSupportRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcEndpointConnectionNotificationsResult describeVpcEndpointConnectionNotifications( - DescribeVpcEndpointConnectionNotificationsRequest describeVpcEndpointConnectionNotificationsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcEndpointConnectionsResult describeVpcEndpointConnections( - DescribeVpcEndpointConnectionsRequest describeVpcEndpointConnectionsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcEndpointServiceConfigurationsResult describeVpcEndpointServiceConfigurations( - DescribeVpcEndpointServiceConfigurationsRequest describeVpcEndpointServiceConfigurationsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcEndpointServicePermissionsResult describeVpcEndpointServicePermissions( - DescribeVpcEndpointServicePermissionsRequest describeVpcEndpointServicePermissionsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeClassicLinkInstancesResult describeClassicLinkInstances() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeClientVpnAuthorizationRulesResult describeClientVpnAuthorizationRules( - DescribeClientVpnAuthorizationRulesRequest describeClientVpnAuthorizationRulesRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeClientVpnConnectionsResult describeClientVpnConnections( - DescribeClientVpnConnectionsRequest describeClientVpnConnectionsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeClientVpnEndpointsResult describeClientVpnEndpoints( - DescribeClientVpnEndpointsRequest describeClientVpnEndpointsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeClientVpnRoutesResult describeClientVpnRoutes( - DescribeClientVpnRoutesRequest describeClientVpnRoutesRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeClientVpnTargetNetworksResult describeClientVpnTargetNetworks( - DescribeClientVpnTargetNetworksRequest describeClientVpnTargetNetworksRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeTagsResult describeTags() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeTransitGatewayAttachmentsResult describeTransitGatewayAttachments( - DescribeTransitGatewayAttachmentsRequest describeTransitGatewayAttachmentsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeTransitGatewayRouteTablesResult describeTransitGatewayRouteTables( - DescribeTransitGatewayRouteTablesRequest describeTransitGatewayRouteTablesRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeTransitGatewayVpcAttachmentsResult describeTransitGatewayVpcAttachments( - DescribeTransitGatewayVpcAttachmentsRequest describeTransitGatewayVpcAttachmentsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeTransitGatewaysResult describeTransitGateways(DescribeTransitGatewaysRequest describeTransitGatewaysRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ImportSnapshotResult importSnapshot() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSpotFleetRequestsResult describeSpotFleetRequests() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeReservedInstancesModificationsResult describeReservedInstancesModifications() - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DeleteSpotDatafeedSubscriptionResult deleteSpotDatafeedSubscription() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeAddressesResult describeAddresses() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeAggregateIdFormatResult describeAggregateIdFormat(DescribeAggregateIdFormatRequest describeAggregateIdFormatRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeKeyPairsResult describeKeyPairs() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeLaunchTemplateVersionsResult describeLaunchTemplateVersions( - DescribeLaunchTemplateVersionsRequest describeLaunchTemplateVersionsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeLaunchTemplatesResult describeLaunchTemplates(DescribeLaunchTemplatesRequest describeLaunchTemplatesRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeInstanceStatusResult describeInstanceStatus() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpnGatewaysResult describeVpnGateways() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferings() - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcEndpointServicesResult describeVpcEndpointServices() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AllocateAddressResult allocateAddress() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public AllocateHostsResult allocateHosts(AllocateHostsRequest allocateHostsRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ApplySecurityGroupsToClientVpnTargetNetworkResult applySecurityGroupsToClientVpnTargetNetwork( - ApplySecurityGroupsToClientVpnTargetNetworkRequest applySecurityGroupsToClientVpnTargetNetworkRequest) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeSnapshotsResult describeSnapshots() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DescribeVpcEndpointsResult describeVpcEndpoints() throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public DryRunResult dryRun(DryRunSupportedRequest request) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } - @Override public void shutdown() { } - - @Override - public AmazonEC2Waiters waiters() { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) { - throw new UnsupportedOperationException("Not supported in mock"); - } - - @Override - public ModifySpotFleetRequestResult modifySpotFleetRequest(ModifySpotFleetRequestRequest modifySpotFleetRequestRequest) - throws AmazonClientException { - throw new UnsupportedOperationException("Not supported in mock"); - } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 7eeadc7f647..bc3993440a3 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -77,10 +77,6 @@ public class AzureBlobStore implements BlobStore { public void close() { } - public boolean containerExist() throws URISyntaxException, StorageException { - return service.doesContainerExist(clientName, container); - } - public boolean blobExists(String blob) throws URISyntaxException, StorageException { return service.blobExists(clientName, container, blob); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 70ab72a232c..3a9c6dd2c3d 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -20,11 +20,9 @@ package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; -import com.microsoft.azure.storage.StorageException; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; @@ -34,14 +32,9 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; -import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; -import org.elasticsearch.snapshots.SnapshotCreationException; -import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.threadpool.ThreadPool; -import java.net.URISyntaxException; -import java.util.List; import java.util.Locale; import java.util.function.Function; @@ -140,20 +133,6 @@ public class AzureRepository extends BlobStoreRepository { return chunkSize; } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData clusterMetadata) { - try { - final AzureBlobStore blobStore = (AzureBlobStore) blobStore(); - if (blobStore.containerExist() == false) { - throw new IllegalArgumentException("The bucket [" + blobStore + "] does not exist. Please create it before " - + " creating an azure snapshot repository backed by it."); - } - } catch (URISyntaxException | StorageException e) { - throw new SnapshotCreationException(metadata.name(), snapshotId, e); - } - super.initializeSnapshot(snapshotId, indices, clusterMetadata); - } - @Override public boolean isReadOnly() { return readonly; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 5e4fa772f77..eda159e9338 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -141,12 +141,6 @@ public class AzureStorageService { return prevSettings; } - public boolean doesContainerExist(String account, String container) throws URISyntaxException, StorageException { - final Tuple> client = client(account); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, client.v2().get())); - } - /** * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile * It should remove the container part (first part of the path) and gives path/to/myfile diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 3df197bc98e..99d75806335 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -55,11 +55,6 @@ public class AzureStorageServiceMock extends AzureStorageService { super(Settings.EMPTY); } - @Override - public boolean doesContainerExist(String account, String container) { - return true; - } - @Override public boolean blobExists(String account, String container, String blob) { return blobs.containsKey(blob); diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index ff1f5bc61ed..f16a82fc558 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -25,39 +25,35 @@ esplugin { } dependencies { - compile 'com.google.cloud:google-cloud-storage:1.59.0' - compile 'com.google.cloud:google-cloud-core:1.59.0' - compile 'com.google.guava:guava:20.0' - compile "joda-time:joda-time:${versions.joda}" - compile 'com.google.http-client:google-http-client:1.24.1' + compile 'com.google.cloud:google-cloud-storage:1.77.0' + compile 'com.google.cloud:google-cloud-core:1.77.0' + compile 'com.google.guava:guava:26.0-jre' + compile 'com.google.http-client:google-http-client:1.30.1' compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" compile "commons-codec:commons-codec:${versions.commonscodec}" - compile 'com.google.api:api-common:1.7.0' - compile 'com.google.api:gax:1.30.0' + compile 'com.google.api:api-common:1.8.1' + compile 'com.google.api:gax:1.45.0' compile 'org.threeten:threetenbp:1.3.3' - compile 'com.google.protobuf:protobuf-java-util:3.6.0' - compile 'com.google.protobuf:protobuf-java:3.6.0' + compile 'com.google.protobuf:protobuf-java-util:3.7.1' + compile 'com.google.protobuf:protobuf-java:3.7.1' compile 'com.google.code.gson:gson:2.7' - compile 'com.google.api.grpc:proto-google-common-protos:1.12.0' + compile 'com.google.api.grpc:proto-google-common-protos:1.16.0' compile 'com.google.api.grpc:proto-google-iam-v1:0.12.0' - compile 'com.google.cloud:google-cloud-core-http:1.59.0' - compile 'com.google.auth:google-auth-library-credentials:0.10.0' - compile 'com.google.auth:google-auth-library-oauth2-http:0.10.0' - compile 'com.google.oauth-client:google-oauth-client:1.24.1' - compile 'com.google.api-client:google-api-client:1.24.1' - compile 'com.google.http-client:google-http-client-appengine:1.24.1' - compile 'com.google.http-client:google-http-client-jackson:1.24.1' - compile 'org.codehaus.jackson:jackson-core-asl:1.9.11' - compile 'com.google.http-client:google-http-client-jackson2:1.24.1' - compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - compile 'com.google.api:gax-httpjson:0.47.0' - compile 'io.opencensus:opencensus-api:0.15.0' + compile 'com.google.cloud:google-cloud-core-http:1.77.0' + compile 'com.google.auth:google-auth-library-credentials:0.16.1' + compile 'com.google.auth:google-auth-library-oauth2-http:0.16.1' + compile 'com.google.oauth-client:google-oauth-client:1.28.0' + compile 'com.google.api-client:google-api-client:1.28.0' + compile 'com.google.http-client:google-http-client-appengine:1.29.2' + compile 'com.google.http-client:google-http-client-jackson2:1.29.2' + compile 'com.google.api:gax-httpjson:0.62.0' compile 'io.grpc:grpc-context:1.12.0' - compile 'io.opencensus:opencensus-contrib-http-util:0.15.0' - compile 'com.google.apis:google-api-services-storage:v1-rev135-1.24.1' + compile 'io.opencensus:opencensus-api:0.18.0' + compile 'io.opencensus:opencensus-contrib-http-util:0.18.0' + compile 'com.google.apis:google-api-services-storage:v1-rev20190426-1.28.0' } dependencyLicenses { @@ -65,7 +61,6 @@ dependencyLicenses { mapping from: /google-auth-.*/, to: 'google-auth' mapping from: /google-http-.*/, to: 'google-http' mapping from: /opencensus.*/, to: 'opencensus' - mapping from: /jackson-.*/, to: 'jackson' mapping from: /http.*/, to: 'httpclient' mapping from: /protobuf.*/, to: 'protobuf' mapping from: /proto-google.*/, to: 'proto-google' @@ -81,6 +76,10 @@ thirdPartyAudit { 'com.google.common.cache.Striped64', 'com.google.common.cache.Striped64$1', 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.Striped64', + 'com.google.common.hash.Striped64$1', + 'com.google.common.hash.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', diff --git a/plugins/repository-gcs/licenses/api-common-1.7.0.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.7.0.jar.sha1 deleted file mode 100644 index 67291b658e5..00000000000 --- a/plugins/repository-gcs/licenses/api-common-1.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea59fb8b2450999345035dec8a6f472543391766 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/api-common-1.8.1.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.8.1.jar.sha1 new file mode 100644 index 00000000000..7a1c114c6c0 --- /dev/null +++ b/plugins/repository-gcs/licenses/api-common-1.8.1.jar.sha1 @@ -0,0 +1 @@ +e89befb19b08ad84b262b2f226ab79aefcaa9d7f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-1.30.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-1.30.0.jar.sha1 deleted file mode 100644 index d6d2bb20ed8..00000000000 --- a/plugins/repository-gcs/licenses/gax-1.30.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58fa2feb11b092be0a6ebe705a28736f12374230 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-1.45.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-1.45.0.jar.sha1 new file mode 100644 index 00000000000..8c0cbc659aa --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-1.45.0.jar.sha1 @@ -0,0 +1 @@ +2ade3e3502f9d14e3731347a82ea02372094211f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.47.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.47.0.jar.sha1 deleted file mode 100644 index fdc722d1520..00000000000 --- a/plugins/repository-gcs/licenses/gax-httpjson-0.47.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d096f3142eb3adbf877588d1044895d148d9efcb \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.62.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.62.0.jar.sha1 new file mode 100644 index 00000000000..161ca85ccfc --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-0.62.0.jar.sha1 @@ -0,0 +1 @@ +05a1a4736acd1c4f30304be953532be6aecdc2c9 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-client-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.24.1.jar.sha1 deleted file mode 100644 index 27dafe58a01..00000000000 --- a/plugins/repository-gcs/licenses/google-api-client-1.24.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -37de23fb9b8b077de4ecec3192d98e752b0e5d72 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-client-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.28.0.jar.sha1 new file mode 100644 index 00000000000..c9b0efd4cdc --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-client-1.28.0.jar.sha1 @@ -0,0 +1 @@ +8fe155d766ed22480939e3a9db428151e0264d9e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev135-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev135-1.24.1.jar.sha1 deleted file mode 100644 index e3042ee6ea0..00000000000 --- a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev135-1.24.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -28d3d391dfc7e7e7951760708ad2f48cecacf38f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20190426-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20190426-1.28.0.jar.sha1 new file mode 100644 index 00000000000..84c68e6ffd5 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20190426-1.28.0.jar.sha1 @@ -0,0 +1 @@ +34dd008901f382507a572f5242d0e5c5ea4ad713 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.10.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.10.0.jar.sha1 deleted file mode 100644 index c8258d69326..00000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f981288bd84fe6d140ed70d1d8dbe994a64fa3cc \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.16.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.16.1.jar.sha1 new file mode 100644 index 00000000000..6527ebec6da --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.16.1.jar.sha1 @@ -0,0 +1 @@ +9a15387cc0438ac3f3e625b6050cf39f4e981e13 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.10.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.10.0.jar.sha1 deleted file mode 100644 index f55ef7c9c21..00000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c079a62086121973a23d90f54e2b8c13050fa39d \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.16.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.16.1.jar.sha1 new file mode 100644 index 00000000000..e6467acf8f5 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.16.1.jar.sha1 @@ -0,0 +1 @@ +3407d434678faef3439a7012efa336e751ddc623 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.59.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.59.0.jar.sha1 deleted file mode 100644 index 20e3b0c782d..00000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-1.59.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2d0c00917660b244da514f82cba96f7697f2c82 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.77.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.77.0.jar.sha1 new file mode 100644 index 00000000000..d16477c5bd6 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-1.77.0.jar.sha1 @@ -0,0 +1 @@ +7cd83a789fde368a999c1793c6297e7b4e56b2ac \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.59.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.59.0.jar.sha1 deleted file mode 100644 index ab4c7b7dca9..00000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-http-1.59.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e2a094ec3e8acb15b99f2d4bd42ac9bbc7d9f33e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.77.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.77.0.jar.sha1 new file mode 100644 index 00000000000..7efc3167589 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-http-1.77.0.jar.sha1 @@ -0,0 +1 @@ +e16acbc935a7762ba9b220860ae45c2c67d17d8c \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.59.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.59.0.jar.sha1 deleted file mode 100644 index 0f5a8633bd0..00000000000 --- a/plugins/repository-gcs/licenses/google-cloud-storage-1.59.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -23dc0edf739ff1fb5a91fbddd7bd1f2cbfe0f827 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.77.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.77.0.jar.sha1 new file mode 100644 index 00000000000..de15e888520 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-storage-1.77.0.jar.sha1 @@ -0,0 +1 @@ +e368e1a8bbc0d0a4354f4e5eec076f38f6966050 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.24.1.jar.sha1 deleted file mode 100644 index 46b99f23e47..00000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.24.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -396eac8d3fb1332675f82b208f48a469d64f3b4a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.30.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.30.1.jar.sha1 new file mode 100644 index 00000000000..85323c108f9 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.30.1.jar.sha1 @@ -0,0 +1 @@ +573aacbda8feb0d43f7056291fbce5496f42a6aa \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.24.1.jar.sha1 deleted file mode 100644 index e39f63fe33a..00000000000 --- a/plugins/repository-gcs/licenses/google-http-client-appengine-1.24.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8535031ae10bf6a196e68f25e10c0d6382699cb6 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.29.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.29.2.jar.sha1 new file mode 100644 index 00000000000..6973b62e928 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.29.2.jar.sha1 @@ -0,0 +1 @@ +d93f4d1d8c2496d75221e53173e4c503b7096a4d \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson-1.24.1.jar.sha1 deleted file mode 100644 index f6b9694abaa..00000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson-1.24.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02c88e77c14effdda76f02a0eac968de74e0bd4e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.24.1.jar.sha1 deleted file mode 100644 index 634b7d9198c..00000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.24.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2ad1dffd8a450055e68d8004fe003033b751d761 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.29.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.29.2.jar.sha1 new file mode 100644 index 00000000000..5a44d18d2aa --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.29.2.jar.sha1 @@ -0,0 +1 @@ +d67891f5a438e1f339387a09628e0ab0af8b612a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.24.1.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.24.1.jar.sha1 deleted file mode 100644 index 2d89939674a..00000000000 --- a/plugins/repository-gcs/licenses/google-oauth-client-1.24.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7b0e0218b96808868c23a7d0b40566a713931d9f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.28.0.jar.sha1 new file mode 100644 index 00000000000..474df6e0265 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-1.28.0.jar.sha1 @@ -0,0 +1 @@ +9a9e5d0c33b663d6475c96ce79b2949545a113af \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/guava-20.0.jar.sha1 b/plugins/repository-gcs/licenses/guava-20.0.jar.sha1 deleted file mode 100644 index 7b6ae09060b..00000000000 --- a/plugins/repository-gcs/licenses/guava-20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89507701249388e1ed5ddcf8c41f4ce1be7831ef \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/guava-26.0-jre.jar.sha1 b/plugins/repository-gcs/licenses/guava-26.0-jre.jar.sha1 new file mode 100644 index 00000000000..63d05007650 --- /dev/null +++ b/plugins/repository-gcs/licenses/guava-26.0-jre.jar.sha1 @@ -0,0 +1 @@ +6a806eff209f36f635f943e16d97491f00f6bfab \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-LICENSE b/plugins/repository-gcs/licenses/jackson-LICENSE deleted file mode 100644 index f5f45d26a49..00000000000 --- a/plugins/repository-gcs/licenses/jackson-LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -This copy of Jackson JSON processor streaming parser/generator is licensed under the -Apache (Software) License, version 2.0 ("the License"). -See the License for details about distribution rights, and the -specific rights regarding derivate works. - -You may obtain a copy of the License at: - -http://www.apache.org/licenses/LICENSE-2.0 diff --git a/plugins/repository-gcs/licenses/jackson-NOTICE b/plugins/repository-gcs/licenses/jackson-NOTICE deleted file mode 100644 index 4c976b7b4cc..00000000000 --- a/plugins/repository-gcs/licenses/jackson-NOTICE +++ /dev/null @@ -1,20 +0,0 @@ -# Jackson JSON processor - -Jackson is a high-performance, Free/Open Source JSON processing library. -It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has -been in development since 2007. -It is currently developed by a community of developers, as well as supported -commercially by FasterXML.com. - -## Licensing - -Jackson core and extension components may licensed under different licenses. -To find the details that apply to this artifact see the accompanying LICENSE file. -For more information, including possible other licensing options, contact -FasterXML.com (http://fasterxml.com). - -## Credits - -A list of contributors may be found from CREDITS file, which is included -in some artifacts (usually source distributions); but is always available -from the source code management (SCM) system project uses. diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.11.jar.sha1 b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.11.jar.sha1 deleted file mode 100644 index ed70030899a..00000000000 --- a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e32303ef8bd18a5c9272780d49b81c95e05ddf43 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-api-0.15.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-api-0.15.0.jar.sha1 deleted file mode 100644 index e200e2e24a7..00000000000 --- a/plugins/repository-gcs/licenses/opencensus-api-0.15.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a098392b287d7924660837f4eba0ce252013683 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-api-0.18.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-api-0.18.0.jar.sha1 new file mode 100644 index 00000000000..8b95ab4e4c4 --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-api-0.18.0.jar.sha1 @@ -0,0 +1 @@ +b89a8f8dfd1e1e0d68d83c82a855624814b19a6e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.15.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.15.0.jar.sha1 deleted file mode 100644 index b642e1ebebd..00000000000 --- a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.15.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d88690591669d9b5ba6d91d9eac7736e58ccf3da \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.18.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.18.0.jar.sha1 new file mode 100644 index 00000000000..1757e005911 --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.18.0.jar.sha1 @@ -0,0 +1 @@ +76a37e4a931d5801a9e25b0c0353e5f37c4d1e8e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-1.12.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-1.12.0.jar.sha1 deleted file mode 100644 index 47f3c178a68..00000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-1.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1140cc74df039deb044ed0e320035e674dc13062 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-1.16.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-1.16.0.jar.sha1 new file mode 100644 index 00000000000..7762b7a3ebd --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-1.16.0.jar.sha1 @@ -0,0 +1 @@ +2c5f022ea3b8e8df6a619c4cd8faf9af86022daa \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.6.0.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.6.0.jar.sha1 deleted file mode 100644 index 050ebd44c92..00000000000 --- a/plugins/repository-gcs/licenses/protobuf-java-3.6.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5333f7e422744d76840c08a106e28e519fbe3acd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.7.1.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.7.1.jar.sha1 new file mode 100644 index 00000000000..51d30a4c185 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-3.7.1.jar.sha1 @@ -0,0 +1 @@ +0bce1b6dc9e4531169542ab37a1c8641bcaa8afb \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.6.0.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.6.0.jar.sha1 deleted file mode 100644 index cc85974499a..00000000000 --- a/plugins/repository-gcs/licenses/protobuf-java-util-3.6.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3680d0042d4fe0b95ada844ff24da0698a7f0773 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.7.1.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.7.1.jar.sha1 new file mode 100644 index 00000000000..d08f6be735b --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-util-3.7.1.jar.sha1 @@ -0,0 +1 @@ +45dc95896cfad26397675fdabef7b032d6db4bb6 \ No newline at end of file diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index d2e5b89b040..8adfaeb4273 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -21,7 +21,6 @@ package org.elasticsearch.repositories.gcs; import com.google.api.client.googleapis.GoogleUtils; import com.google.api.client.http.HttpTransport; -import com.google.api.client.http.javanet.DefaultConnectionFactory; import com.google.api.client.http.javanet.NetHttpTransport; import com.google.auth.oauth2.ServiceAccountCredentials; import com.google.cloud.http.HttpTransportOptions; @@ -37,10 +36,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.LazyInitializable; import java.io.IOException; -import java.net.HttpURLConnection; import java.net.URI; -import java.net.URISyntaxException; -import java.net.URL; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; @@ -104,10 +100,16 @@ public class GoogleCloudStorageService { * @return a new client storage instance that can be used to manage objects * (blobs) */ - private Storage createClient(final String clientName, final GoogleCloudStorageClientSettings clientSettings) throws IOException { + private static Storage createClient(String clientName, GoogleCloudStorageClientSettings clientSettings) throws IOException { logger.debug(() -> new ParameterizedMessage("creating GCS client with client_name [{}], endpoint [{}]", clientName, clientSettings.getHost())); - final HttpTransport httpTransport = SocketAccess.doPrivilegedIOException(() -> createHttpTransport(clientSettings.getHost())); + final HttpTransport httpTransport = SocketAccess.doPrivilegedIOException(() -> { + final NetHttpTransport.Builder builder = new NetHttpTransport.Builder(); + // requires java.lang.RuntimePermission "setFactory" + // Pin the TLS trust certificates. + builder.trustCertificates(GoogleUtils.getCertificateTrustStore()); + return builder.build(); + }); final HttpTransportOptions httpTransportOptions = HttpTransportOptions.newBuilder() .setConnectTimeout(toTimeout(clientSettings.getConnectTimeout())) .setReadTimeout(toTimeout(clientSettings.getReadTimeout())) @@ -145,54 +147,6 @@ public class GoogleCloudStorageService { return storageOptionsBuilder.build().getService(); } - /** - * Pins the TLS trust certificates and, more importantly, overrides connection - * URLs in the case of a custom endpoint setting because some connections don't - * fully honor this setting (bugs in the SDK). The default connection factory - * opens a new connection for each request. This is required for the storage - * instance to be thread-safe. - **/ - private static HttpTransport createHttpTransport(final String endpoint) throws Exception { - final NetHttpTransport.Builder builder = new NetHttpTransport.Builder(); - // requires java.lang.RuntimePermission "setFactory" - builder.trustCertificates(GoogleUtils.getCertificateTrustStore()); - if (Strings.hasLength(endpoint)) { - final URL endpointUrl = URI.create(endpoint).toURL(); - // it is crucial to open a connection for each URL (see {@code - // DefaultConnectionFactory#openConnection}) instead of reusing connections, - // because the storage instance has to be thread-safe as it is cached. - builder.setConnectionFactory(new DefaultConnectionFactory() { - @Override - public HttpURLConnection openConnection(final URL originalUrl) throws IOException { - // test if the URL is built correctly, ie following the `host` setting - if (originalUrl.getHost().equals(endpointUrl.getHost()) && originalUrl.getPort() == endpointUrl.getPort() - && originalUrl.getProtocol().equals(endpointUrl.getProtocol())) { - return super.openConnection(originalUrl); - } - // override connection URLs because some don't follow the config. See - // https://github.com/GoogleCloudPlatform/google-cloud-java/issues/3254 and - // https://github.com/GoogleCloudPlatform/google-cloud-java/issues/3255 - URI originalUri; - try { - originalUri = originalUrl.toURI(); - } catch (final URISyntaxException e) { - throw new RuntimeException(e); - } - String overridePath = "/"; - if (originalUri.getRawPath() != null) { - overridePath = originalUri.getRawPath(); - } - if (originalUri.getRawQuery() != null) { - overridePath += "?" + originalUri.getRawQuery(); - } - return super.openConnection( - new URL(endpointUrl.getProtocol(), endpointUrl.getHost(), endpointUrl.getPort(), overridePath)); - } - }); - } - return builder.build(); - } - /** * Converts timeout values from the settings to a timeout value for the Google * Cloud SDK diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java index eddf2a9f780..ca6ca60e41e 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java @@ -281,6 +281,11 @@ class MockStorage implements Storage { return null; } + @Override + public WriteChannel writer(URL signedURL) { + return null; + } + // Everything below this line is not implemented. @Override @@ -288,6 +293,11 @@ class MockStorage implements Storage { return null; } + @Override + public Blob create(BlobInfo blobInfo, byte[] content, int offset, int length, BlobTargetOption... options) { + return null; + } + @Override public Bucket create(BucketInfo bucketInfo, BucketTargetOption... options) { return null; diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 6fec01d9b0e..dbdf654a13c 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -28,7 +28,7 @@ esplugin { } versions << [ - 'aws': '1.11.505' + 'aws': '1.11.562' ] dependencies { diff --git a/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.505.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.505.jar.sha1 deleted file mode 100644 index add5db290e8..00000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.505.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d19328c227b2b5ad81d137361ebc9cbcd0396465 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.562.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.562.jar.sha1 new file mode 100644 index 00000000000..ed8ded6a360 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.562.jar.sha1 @@ -0,0 +1 @@ +b5fc47ec1b5afe180f5ebb4eda755acdca7a20ae \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.505.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.505.jar.sha1 deleted file mode 100644 index ab2bfdbc189..00000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.505.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a219919090a6cadd7e119c899c90343ad9c0077 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.562.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.562.jar.sha1 new file mode 100644 index 00000000000..65c85dc87b1 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.562.jar.sha1 @@ -0,0 +1 @@ +1fdf4daf1960fe760e7a950dd28a05c5abc12788 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.505.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.505.jar.sha1 deleted file mode 100644 index 04c4cc213c6..00000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.505.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4cf82765b04a579609314ab7f296a9a0ddae1cf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.562.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.562.jar.sha1 new file mode 100644 index 00000000000..8e852fe9b27 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.562.jar.sha1 @@ -0,0 +1 @@ +1712c878f7e9483ceac1eb2356a9457a3c8df03e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jmespath-java-1.11.505.jar.sha1 b/plugins/repository-s3/licenses/jmespath-java-1.11.505.jar.sha1 deleted file mode 100644 index 803d4fe85d7..00000000000 --- a/plugins/repository-s3/licenses/jmespath-java-1.11.505.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -067234d307b210097e247a49f08875e0cd3f2b95 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jmespath-java-1.11.562.jar.sha1 b/plugins/repository-s3/licenses/jmespath-java-1.11.562.jar.sha1 new file mode 100644 index 00000000000..8e2d0e1935a --- /dev/null +++ b/plugins/repository-s3/licenses/jmespath-java-1.11.562.jar.sha1 @@ -0,0 +1 @@ +1147ed0ad1f2c5a16b8271e38e3cda5cd488c8ae \ No newline at end of file diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 16398b380cf..455a3a8cb46 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -50,7 +50,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; public class DieWithDignityIT extends ESRestTestCase { - + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/43413") public void testDieWithDignity() throws Exception { // deleting the PID file prevents stopping the cluster from failing since it occurs if and only if the PID file exists final Path pidFile = PathUtils.get(System.getProperty("pidfile")); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.recovery/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.recovery/10_basic.yml index f227e076aa9..4806601cec2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.recovery/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.recovery/10_basic.yml @@ -130,3 +130,28 @@ index: [v*] - match: { $body: {} } +--- +"Indices recovery test with detailed parameter": + - skip: + version: " - 7.2.99" + reason: bug with detailed parameter fixed in 7.3 + + - do: + indices.create: + index: test_3 + body: + settings: + index: + number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.recovery: + index: [test_3] + human: true + detailed: true + + - match: { test_3.shards.0.index.files.details: [] } diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index 94c4a273159..2bc1d590106 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -38,12 +38,14 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.IdentityHashMap; import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.Queue; import java.util.Set; +import java.util.function.Predicate; import java.util.stream.Collectors; public final class ExceptionsHelper { @@ -185,22 +187,14 @@ public final class ExceptionsHelper { * @return Corruption indicating exception if one is found, otherwise {@code null} */ public static IOException unwrapCorruption(Throwable t) { - if (t != null) { - do { - for (Class clazz : CORRUPTION_EXCEPTIONS) { - if (clazz.isInstance(t)) { - return (IOException) t; - } + return t == null ? null : ExceptionsHelper.unwrapCausesAndSuppressed(t, cause -> { + for (Class clazz : CORRUPTION_EXCEPTIONS) { + if (clazz.isInstance(cause)) { + return true; } - for (Throwable suppressed : t.getSuppressed()) { - IOException corruptionException = unwrapCorruption(suppressed); - if (corruptionException != null) { - return corruptionException; - } - } - } while ((t = t.getCause()) != null); - } - return null; + } + return false; + }).orElse(null); } /** @@ -213,7 +207,11 @@ public final class ExceptionsHelper { */ public static Throwable unwrap(Throwable t, Class... clazzes) { if (t != null) { + final Set seen = Collections.newSetFromMap(new IdentityHashMap<>()); do { + if (seen.add(t) == false) { + return null; + } for (Class clazz : clazzes) { if (clazz.isInstance(t)) { return t; @@ -246,33 +244,22 @@ public final class ExceptionsHelper { return true; } - static final int MAX_ITERATIONS = 1024; - - /** - * Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable. - * - * @param cause the root throwable - * @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable - */ - public static Optional maybeError(final Throwable cause, final Logger logger) { - // early terminate if the cause is already an error - if (cause instanceof Error) { - return Optional.of((Error) cause); + @SuppressWarnings("unchecked") + private static Optional unwrapCausesAndSuppressed(Throwable cause, Predicate predicate) { + if (predicate.test(cause)) { + return Optional.of((T) cause); } final Queue queue = new LinkedList<>(); queue.add(cause); - int iterations = 0; + final Set seen = Collections.newSetFromMap(new IdentityHashMap<>()); while (queue.isEmpty() == false) { - iterations++; - // this is a guard against deeply nested or circular chains of exceptions - if (iterations > MAX_ITERATIONS) { - logger.warn("giving up looking for fatal errors", cause); - break; - } final Throwable current = queue.remove(); - if (current instanceof Error) { - return Optional.of((Error) current); + if (seen.add(current) == false) { + continue; + } + if (predicate.test(current)) { + return Optional.of((T) current); } Collections.addAll(queue, current.getSuppressed()); if (current.getCause() != null) { @@ -283,21 +270,24 @@ public final class ExceptionsHelper { } /** - * See {@link #maybeError(Throwable, Logger)}. Uses the class-local logger. + * Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable. + * + * @param cause the root throwable + * @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable */ public static Optional maybeError(final Throwable cause) { - return maybeError(cause, logger); + return unwrapCausesAndSuppressed(cause, t -> t instanceof Error); } /** * If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be * caught and bubbles up to the uncaught exception handler. Note that the cause tree is examined for any {@link Error}. See - * {@link #maybeError(Throwable, Logger)} for the semantics. + * {@link #maybeError(Throwable)} for the semantics. * * @param throwable the throwable to possibly throw on another thread */ public static void maybeDieOnAnotherThread(final Throwable throwable) { - ExceptionsHelper.maybeError(throwable, logger).ifPresent(error -> { + ExceptionsHelper.maybeError(throwable).ifPresent(error -> { /* * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, sometimes the stack * contains statements that catch any throwable (e.g., Netty, and the JDK futures framework). This means that a rethrow here diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 22091648b35..d81eba434e7 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -96,6 +96,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_7_2 = new Version(6070299, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final Version V_6_8_0 = new Version(6080099, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final Version V_6_8_1 = new Version(6080199, org.apache.lucene.util.Version.LUCENE_7_7_0); + public static final Version V_6_8_2 = new Version(6080299, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final Version V_7_0_0 = new Version(7000099, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_0_1 = new Version(7000199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_1_0 = new Version(7010099, org.apache.lucene.util.Version.LUCENE_8_0_0); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java index c653c264e95..b44d1072181 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java @@ -78,6 +78,7 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse { } } + @Override protected void addCustomFields(final XContentBuilder builder, final Params params) throws IOException { super.addCustomFields(builder, params); builder.startObject("indices"); @@ -190,7 +191,7 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse { public static class ShardResult implements Writeable, ToXContentFragment { private final int id; - private final ShardResult.Failure[] failures; + private final Failure[] failures; public ShardResult(final int id, final Failure[] failures) { this.id = id; @@ -199,7 +200,7 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse { ShardResult(final StreamInput in) throws IOException { this.id = in.readVInt(); - this.failures = in.readOptionalArray(Failure::readFailure, ShardResult.Failure[]::new); + this.failures = in.readOptionalArray(Failure::readFailure, Failure[]::new); } @Override @@ -227,9 +228,7 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse { builder.startArray("failures"); if (failures != null) { for (Failure failure : failures) { - builder.startObject(); failure.toXContent(builder, params); - builder.endObject(); } } builder.endArray(); @@ -242,7 +241,7 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse { return Strings.toString(this); } - public static class Failure extends DefaultShardOperationFailedException implements Writeable { + public static class Failure extends DefaultShardOperationFailedException { private @Nullable String nodeId; @@ -275,11 +274,11 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse { } @Override - public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + public XContentBuilder innerToXContent(final XContentBuilder builder, final Params params) throws IOException { if (nodeId != null) { builder.field("node", nodeId); } - return super.toXContent(builder, params); + return super.innerToXContent(builder, params); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index 22a0777f7bf..79cbab47819 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.close; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.support.ActionFilters; @@ -94,12 +95,12 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA } @Override - protected ReplicaResult shardOperationOnReplica(final ShardRequest shardRequest, final IndexShard replica) { + protected ReplicaResult shardOperationOnReplica(final ShardRequest shardRequest, final IndexShard replica) throws IOException { executeShardOperation(shardRequest, replica); return new ReplicaResult(); } - private void executeShardOperation(final ShardRequest request, final IndexShard indexShard) { + private void executeShardOperation(final ShardRequest request, final IndexShard indexShard) throws IOException { final ShardId shardId = indexShard.shardId(); if (indexShard.getActiveOperationsCount() != IndexShard.OPERATIONS_BLOCKED) { throw new IllegalStateException("Index shard " + shardId + " is not blocking all operations during closing"); @@ -109,9 +110,19 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA if (clusterBlocks.hasIndexBlock(shardId.getIndexName(), request.clusterBlock()) == false) { throw new IllegalStateException("Index shard " + shardId + " must be blocked by " + request.clusterBlock() + " before closing"); } - indexShard.verifyShardBeforeIndexClosing(); - indexShard.flush(new FlushRequest().force(true).waitIfOngoing(true)); - logger.trace("{} shard is ready for closing", shardId); + if (request.isPhase1()) { + // in order to advance the global checkpoint to the maximum sequence number, the (persisted) local checkpoint needs to be + // advanced first, which, when using async translog syncing, does not automatically hold at the time where we have acquired + // all operation permits. Instead, this requires and explicit sync, which communicates the updated (persisted) local checkpoint + // to the primary (we call this phase1), and phase2 can then use the fact that the global checkpoint has moved to the maximum + // sequence number to pass the verifyShardBeforeIndexClosing check and create a safe commit where the maximum sequence number + // is equal to the global checkpoint. + indexShard.sync(); + } else { + indexShard.verifyShardBeforeIndexClosing(); + indexShard.flush(new FlushRequest().force(true).waitIfOngoing(true)); + logger.trace("{} shard is ready for closing", shardId); + } } @Override @@ -136,14 +147,22 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA private final ClusterBlock clusterBlock; + private final boolean phase1; + ShardRequest(StreamInput in) throws IOException { super(in); clusterBlock = new ClusterBlock(in); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + phase1 = in.readBoolean(); + } else { + phase1 = false; + } } - public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final TaskId parentTaskId) { + public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final boolean phase1, final TaskId parentTaskId) { super(shardId); this.clusterBlock = Objects.requireNonNull(clusterBlock); + this.phase1 = phase1; setParentTask(parentTaskId); } @@ -161,10 +180,17 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); clusterBlock.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + out.writeBoolean(phase1); + } } public ClusterBlock clusterBlock() { return clusterBlock; } + + public boolean isPhase1() { + return phase1; + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 0e2f9f752af..3f4510c8f24 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -267,12 +267,9 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { builder.field("node", nodeId()); - super.innerToXContent(builder, params); - builder.endObject(); - return builder; + return super.innerToXContent(builder, params); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 04b429bb77f..8475272a5e2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -284,8 +284,7 @@ abstract class AbstractSearchAsyncAction exten return request; } - @Override - public final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) { + protected final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) { ShardSearchFailure[] failures = buildShardFailures(); Boolean allowPartialResults = request.allowPartialSearchResults(); assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; @@ -296,6 +295,11 @@ abstract class AbstractSearchAsyncAction exten skippedOps.get(), buildTookInMillis(), failures, clusters); } + @Override + public void sendSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) { + listener.onResponse(buildSearchResponse(internalSearchResponse, scrollId)); + } + @Override public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); @@ -316,11 +320,6 @@ abstract class AbstractSearchAsyncAction exten executor.execute(command); } - @Override - public final void onResponse(SearchResponse response) { - listener.onResponse(response); - } - @Override public final void onFailure(Exception e) { listener.onFailure(e); diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index afc81b21da4..301cb600452 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -30,7 +30,6 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.internal.InternalSearchResponse; -import java.io.IOException; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -65,7 +64,7 @@ final class ExpandSearchPhase extends SearchPhase { } @Override - public void run() throws IOException { + public void run() { if (isCollapseRequest() && searchResponse.hits().getHits().length > 0) { SearchRequest searchRequest = context.getRequest(); CollapseBuilder collapseBuilder = searchRequest.source().collapse(); diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 1bbca35cb9a..2115b4fa998 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -22,8 +22,8 @@ import com.carrotsearch.hppc.IntArrayList; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.search.ScoreDoc; -import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -76,10 +76,10 @@ final class FetchSearchPhase extends SearchPhase { } @Override - public void run() throws IOException { - context.execute(new ActionRunnable(context) { + public void run() { + context.execute(new AbstractRunnable() { @Override - public void doRun() throws IOException { + protected void doRun() throws Exception { // we do the heavy lifting in this inner run method where we reduce aggs etc. that's why we fork this phase // off immediately instead of forking when we send back the response to the user since there we only need // to merge together the fetched results which is a linear operation. @@ -209,8 +209,8 @@ final class FetchSearchPhase extends SearchPhase { private static SearchPhase sendResponsePhase(InternalSearchResponse response, String scrollId, SearchPhaseContext context) { return new SearchPhase("response") { @Override - public void run() throws IOException { - context.onResponse(context.buildSearchResponse(response, scrollId)); + public void run() { + context.sendSearchResponse(response, scrollId); } }; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java index 9829ff6a983..28838defa3e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Nullable; import org.elasticsearch.search.SearchShardTarget; @@ -32,7 +31,7 @@ import java.util.concurrent.Executor; /** * This class provide contextual state and access to resources across multiple search phases. */ -interface SearchPhaseContext extends ActionListener, Executor { +interface SearchPhaseContext extends Executor { // TODO maybe we can make this concrete later - for now we just implement this in the base class for all initial phases /** @@ -56,11 +55,16 @@ interface SearchPhaseContext extends ActionListener, Executor { SearchRequest getRequest(); /** - * Builds the final search response that should be send back to the user. + * Builds and sends the final search response back to the user. * @param internalSearchResponse the internal search response * @param scrollId an optional scroll ID if this search is a scroll search */ - SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId); + void sendSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId); + + /** + * Notifies the top-level listener of the provided exception + */ + void onFailure(Exception e); /** * This method will communicate a fatal phase failure back to the user. In contrast to a shard failure @@ -113,5 +117,4 @@ interface SearchPhaseContext extends ActionListener, Executor { * a response is returned to the user indicating that all shards have failed. */ void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase); - } diff --git a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index 7aa7dfb62a6..aa3e91c634a 100644 --- a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -35,7 +36,7 @@ import java.io.IOException; import static org.elasticsearch.ExceptionsHelper.detailedMessage; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -public class DefaultShardOperationFailedException extends ShardOperationFailedException { +public class DefaultShardOperationFailedException extends ShardOperationFailedException implements Writeable { private static final String INDEX = "index"; private static final String SHARD_ID = "shard"; @@ -90,13 +91,13 @@ public class DefaultShardOperationFailedException extends ShardOperationFailedEx } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); innerToXContent(builder, params); builder.endObject(); return builder; } - + protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { builder.field("shard", shardId()); builder.field("index", index()); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index a362502bd36..36f31af27a3 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -111,6 +111,7 @@ public class ReplicationOperation< private void handlePrimaryResult(final PrimaryResultT primaryResult) { this.primaryResult = primaryResult; primary.updateLocalCheckpointForShard(primary.routingEntry().allocationId().getId(), primary.localCheckpoint()); + primary.updateGlobalCheckpointForShard(primary.routingEntry().allocationId().getId(), primary.globalCheckpoint()); final ReplicaRequest replicaRequest = primaryResult.replicaRequest(); if (replicaRequest != null) { if (logger.isTraceEnabled()) { @@ -123,7 +124,7 @@ public class ReplicationOperation< // is valid for this replication group. If we would sample in the reverse, the global checkpoint might be based on a subset // of the sampled replication group, and advanced further than what the given replication group would allow it to. // This would entail that some shards could learn about a global checkpoint that would be higher than its local checkpoint. - final long globalCheckpoint = primary.globalCheckpoint(); + final long globalCheckpoint = primary.computedGlobalCheckpoint(); // we have to capture the max_seq_no_of_updates after this request was completed on the primary to make sure the value of // max_seq_no_of_updates on replica when this request is executed is at least the value on the primary when it was executed // on. @@ -341,16 +342,23 @@ public class ReplicationOperation< void updateGlobalCheckpointForShard(String allocationId, long globalCheckpoint); /** - * Returns the local checkpoint on the primary shard. + * Returns the persisted local checkpoint on the primary shard. * * @return the local checkpoint */ long localCheckpoint(); /** - * Returns the global checkpoint on the primary shard. + * Returns the global checkpoint computed on the primary shard. * - * @return the global checkpoint + * @return the computed global checkpoint + */ + long computedGlobalCheckpoint(); + + /** + * Returns the persisted global checkpoint on the primary shard. + * + * @return the persisted global checkpoint */ long globalCheckpoint(); @@ -419,16 +427,16 @@ public class ReplicationOperation< public interface ReplicaResponse { /** - * The local checkpoint for the shard. + * The persisted local checkpoint for the shard. * - * @return the local checkpoint + * @return the persisted local checkpoint **/ long localCheckpoint(); /** - * The global checkpoint for the shard. + * The persisted global checkpoint for the shard. * - * @return the global checkpoint + * @return the persisted global checkpoint **/ long globalCheckpoint(); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index d981a314716..2aff3c66bba 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -527,7 +527,7 @@ public abstract class TransportReplicationAction< final ReplicaResult replicaResult = shardOperationOnReplica(replicaRequest.getRequest(), replica); releasable.close(); // release shard operation lock before responding to caller final TransportReplicationAction.ReplicaResponse response = - new ReplicaResponse(replica.getLocalCheckpoint(), replica.getGlobalCheckpoint()); + new ReplicaResponse(replica.getLocalCheckpoint(), replica.getLastSyncedGlobalCheckpoint()); replicaResult.respond(new ResponseListener(response)); } catch (final Exception e) { Releasables.closeWhileHandlingException(releasable); // release shard operation lock before responding to caller @@ -893,10 +893,6 @@ public abstract class TransportReplicationAction< operationLock.close(); } - public long getLocalCheckpoint() { - return indexShard.getLocalCheckpoint(); - } - public ShardRouting routingEntry() { return indexShard.routingEntry(); } @@ -944,7 +940,12 @@ public abstract class TransportReplicationAction< @Override public long globalCheckpoint() { - return indexShard.getGlobalCheckpoint(); + return indexShard.getLastSyncedGlobalCheckpoint(); + } + + @Override + public long computedGlobalCheckpoint() { + return indexShard.getLastKnownGlobalCheckpoint(); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index c9f40ba6728..b07ba8d09f3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -589,7 +589,11 @@ public class ShardStateAction { @Override public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + if (e instanceof FailedToCommitClusterStateException || e instanceof NotMasterException) { + logger.debug(() -> new ParameterizedMessage("failure during [{}]", source), e); + } else { + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index ef4583e98e5..021752ba4db 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -389,11 +389,26 @@ public class MetaDataIndexStateService { } final TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), request.taskId()); final TransportVerifyShardBeforeCloseAction.ShardRequest shardRequest = - new TransportVerifyShardBeforeCloseAction.ShardRequest(shardId, closingBlock, parentTaskId); + new TransportVerifyShardBeforeCloseAction.ShardRequest(shardId, closingBlock, true, parentTaskId); if (request.ackTimeout() != null) { shardRequest.timeout(request.ackTimeout()); } - transportVerifyShardBeforeCloseAction.execute(shardRequest, listener); + transportVerifyShardBeforeCloseAction.execute(shardRequest, new ActionListener() { + @Override + public void onResponse(ReplicationResponse replicationResponse) { + final TransportVerifyShardBeforeCloseAction.ShardRequest shardRequest = + new TransportVerifyShardBeforeCloseAction.ShardRequest(shardId, closingBlock, false, parentTaskId); + if (request.ackTimeout() != null) { + shardRequest.timeout(request.ackTimeout()); + } + transportVerifyShardBeforeCloseAction.execute(shardRequest, listener); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); } } diff --git a/server/src/main/java/org/elasticsearch/common/time/DateMathParser.java b/server/src/main/java/org/elasticsearch/common/time/DateMathParser.java index 3ba392822ca..fc2d231bb2f 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateMathParser.java @@ -31,7 +31,7 @@ import java.util.function.LongSupplier; public interface DateMathParser { /** - * Parse a date math expression without timzeone info and rounding down. + * Parse a date math expression without timezone info and rounding down. */ default Instant parse(String text, LongSupplier now) { return parse(text, now, false, (ZoneId) null); @@ -43,8 +43,8 @@ public interface DateMathParser { // exists for backcompat, do not use! @Deprecated - default Instant parse(String text, LongSupplier now, boolean roundUp, DateTimeZone tz) { - return parse(text, now, roundUp, tz == null ? null : ZoneId.of(tz.getID())); + default Instant parse(String text, LongSupplier now, boolean roundUpProperty, DateTimeZone tz) { + return parse(text, now, roundUpProperty, tz == null ? null : ZoneId.of(tz.getID())); } /** @@ -65,11 +65,11 @@ public interface DateMathParser { * s second * * - * @param text the input - * @param now a supplier to retrieve the current date in milliseconds, if needed for additions - * @param roundUp should the result be rounded up - * @param tz an optional timezone that should be applied before returning the milliseconds since the epoch - * @return the parsed date as an Instant since the epoch + * @param text the input + * @param now a supplier to retrieve the current date in milliseconds, if needed for additions + * @param roundUpProperty should the result be rounded up with the granularity of the rounding (e.g. now/M) + * @param tz an optional timezone that should be applied before returning the milliseconds since the epoch + * @return the parsed date as an Instant since the epoch */ - Instant parse(String text, LongSupplier now, boolean roundUp, ZoneId tz); + Instant parse(String text, LongSupplier now, boolean roundUpProperty, ZoneId tz); } diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java index dc7c195e2fd..78d4f10d87c 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java @@ -59,7 +59,7 @@ public class JavaDateMathParser implements DateMathParser { } @Override - public Instant parse(String text, LongSupplier now, boolean roundUp, ZoneId timeZone) { + public Instant parse(String text, LongSupplier now, boolean roundUpProperty, ZoneId timeZone) { Instant time; String mathString; if (text.startsWith("now")) { @@ -73,16 +73,16 @@ public class JavaDateMathParser implements DateMathParser { } else { int index = text.indexOf("||"); if (index == -1) { - return parseDateTime(text, timeZone, roundUp); + return parseDateTime(text, timeZone, roundUpProperty); } time = parseDateTime(text.substring(0, index), timeZone, false); mathString = text.substring(index + 2); } - return parseMath(mathString, time, roundUp, timeZone); + return parseMath(mathString, time, roundUpProperty, timeZone); } - private Instant parseMath(final String mathString, final Instant time, final boolean roundUp, + private Instant parseMath(final String mathString, final Instant time, final boolean roundUpProperty, ZoneId timeZone) throws ElasticsearchParseException { if (timeZone == null) { timeZone = ZoneOffset.UTC; @@ -133,78 +133,79 @@ public class JavaDateMathParser implements DateMathParser { case 'y': if (round) { dateTime = dateTime.withDayOfYear(1).with(LocalTime.MIN); + if (roundUpProperty) { + dateTime = dateTime.plusYears(1); + } } else { dateTime = dateTime.plusYears(sign * num); } - if (roundUp) { - dateTime = dateTime.plusYears(1); - } break; case 'M': if (round) { dateTime = dateTime.withDayOfMonth(1).with(LocalTime.MIN); + if (roundUpProperty) { + dateTime = dateTime.plusMonths(1); + } } else { dateTime = dateTime.plusMonths(sign * num); } - if (roundUp) { - dateTime = dateTime.plusMonths(1); - } break; case 'w': if (round) { dateTime = dateTime.with(TemporalAdjusters.previousOrSame(DayOfWeek.MONDAY)).with(LocalTime.MIN); + if (roundUpProperty) { + dateTime = dateTime.plusWeeks(1); + } } else { dateTime = dateTime.plusWeeks(sign * num); } - if (roundUp) { - dateTime = dateTime.plusWeeks(1); - } break; case 'd': if (round) { dateTime = dateTime.with(LocalTime.MIN); + if (roundUpProperty) { + dateTime = dateTime.plusDays(1); + } } else { dateTime = dateTime.plusDays(sign * num); } - if (roundUp) { - dateTime = dateTime.plusDays(1); - } break; case 'h': case 'H': if (round) { dateTime = dateTime.withMinute(0).withSecond(0).withNano(0); + if (roundUpProperty) { + dateTime = dateTime.plusHours(1); + } } else { dateTime = dateTime.plusHours(sign * num); } - if (roundUp) { - dateTime = dateTime.plusHours(1); - } break; case 'm': if (round) { dateTime = dateTime.withSecond(0).withNano(0); + if (roundUpProperty) { + dateTime = dateTime.plusMinutes(1); + } } else { dateTime = dateTime.plusMinutes(sign * num); } - if (roundUp) { - dateTime = dateTime.plusMinutes(1); - } break; case 's': if (round) { dateTime = dateTime.withNano(0); + if (roundUpProperty) { + dateTime = dateTime.plusSeconds(1); + } } else { dateTime = dateTime.plusSeconds(sign * num); } - if (roundUp) { - dateTime = dateTime.plusSeconds(1); - } break; default: throw new ElasticsearchParseException("unit [{}] not supported for date math [{}]", unit, mathString); } - if (roundUp) { + if (round && roundUpProperty) { + // subtract 1 millisecond to get the largest inclusive value dateTime = dateTime.minus(1, ChronoField.MILLI_OF_SECOND.getBaseUnit()); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index b758868ed30..e21b816aefd 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -793,9 +793,9 @@ public abstract class Engine implements Closeable { } /** - * @return the local checkpoint for this Engine + * @return the persisted local checkpoint for this Engine */ - public abstract long getLocalCheckpoint(); + public abstract long getPersistedLocalCheckpoint(); /** * @return a {@link SeqNoStats} object, using local state and the supplied global checkpoint @@ -1142,7 +1142,7 @@ public abstract class Engine implements Closeable { */ @SuppressWarnings("finally") private void maybeDie(final String maybeMessage, final Throwable maybeFatal) { - ExceptionsHelper.maybeError(maybeFatal, logger).ifPresent(error -> { + ExceptionsHelper.maybeError(maybeFatal).ifPresent(error -> { try { logger.error(maybeMessage, error); } finally { diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 932a4b48ef8..65a5d4d2cea 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -117,6 +117,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiFunction; +import java.util.function.LongConsumer; import java.util.function.LongSupplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -208,7 +209,14 @@ public class InternalEngine extends Engine { throttle = new IndexThrottle(); try { trimUnsafeCommits(engineConfig); - translog = openTranslog(engineConfig, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier()); + translog = openTranslog(engineConfig, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier(), + seqNo -> { + final LocalCheckpointTracker tracker = getLocalCheckpointTracker(); + assert tracker != null || getTranslog().isOpen() == false; + if (tracker != null) { + tracker.markSeqNoAsPersisted(seqNo); + } + }); assert translog.getGeneration() != null; this.translog = translog; this.softDeleteEnabled = engineConfig.getIndexSettings().isSoftDeleteEnabled(); @@ -245,10 +253,10 @@ public class InternalEngine extends Engine { for (ReferenceManager.RefreshListener listener: engineConfig.getInternalRefreshListener()) { this.internalSearcherManager.addListener(listener); } - this.lastRefreshedCheckpointListener = new LastRefreshedCheckpointListener(localCheckpointTracker.getCheckpoint()); + this.lastRefreshedCheckpointListener = new LastRefreshedCheckpointListener(localCheckpointTracker.getProcessedCheckpoint()); this.internalSearcherManager.addListener(lastRefreshedCheckpointListener); maxSeqNoOfUpdatesOrDeletes = new AtomicLong(SequenceNumbers.max(localCheckpointTracker.getMaxSeqNo(), translog.getMaxSeqNo())); - if (softDeleteEnabled && localCheckpointTracker.getCheckpoint() < localCheckpointTracker.getMaxSeqNo()) { + if (softDeleteEnabled && localCheckpointTracker.getPersistedCheckpoint() < localCheckpointTracker.getMaxSeqNo()) { try (Searcher searcher = acquireSearcher("restore_version_map_and_checkpoint_tracker", SearcherScope.INTERNAL)) { restoreVersionMapAndCheckpointTracker(Lucene.wrapAllDocsLive(searcher.getDirectoryReader())); } catch (IOException e) { @@ -370,7 +378,7 @@ public class InternalEngine extends Engine { public int restoreLocalHistoryFromTranslog(TranslogRecoveryRunner translogRecoveryRunner) throws IOException { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); - final long localCheckpoint = localCheckpointTracker.getCheckpoint(); + final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); try (Translog.Snapshot snapshot = getTranslog().newSnapshotFromMinSeqNo(localCheckpoint + 1)) { return translogRecoveryRunner.run(this, snapshot); } @@ -381,19 +389,23 @@ public class InternalEngine extends Engine { public int fillSeqNoGaps(long primaryTerm) throws IOException { try (ReleasableLock ignored = writeLock.acquire()) { ensureOpen(); - final long localCheckpoint = localCheckpointTracker.getCheckpoint(); + final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); final long maxSeqNo = localCheckpointTracker.getMaxSeqNo(); int numNoOpsAdded = 0; for ( long seqNo = localCheckpoint + 1; seqNo <= maxSeqNo; - seqNo = localCheckpointTracker.getCheckpoint() + 1 /* the local checkpoint might have advanced so we leap-frog */) { + seqNo = localCheckpointTracker.getProcessedCheckpoint() + 1 /* leap-frog the local checkpoint */) { innerNoOp(new NoOp(seqNo, primaryTerm, Operation.Origin.PRIMARY, System.nanoTime(), "filling gaps")); numNoOpsAdded++; - assert seqNo <= localCheckpointTracker.getCheckpoint() - : "local checkpoint did not advance; was [" + seqNo + "], now [" + localCheckpointTracker.getCheckpoint() + "]"; + assert seqNo <= localCheckpointTracker.getProcessedCheckpoint() : + "local checkpoint did not advance; was [" + seqNo + "], now [" + localCheckpointTracker.getProcessedCheckpoint() + "]"; } + syncTranslog(); // to persist noops associated with the advancement of the local checkpoint + assert localCheckpointTracker.getPersistedCheckpoint() == maxSeqNo + : "persisted local checkpoint did not advance to max seq no; is [" + localCheckpointTracker.getPersistedCheckpoint() + + "], max seq no [" + maxSeqNo + "]"; return numNoOpsAdded; } } @@ -471,13 +483,13 @@ public class InternalEngine extends Engine { } private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy, - LongSupplier globalCheckpointSupplier) throws IOException { + LongSupplier globalCheckpointSupplier, LongConsumer persistedSequenceNumberConsumer) throws IOException { final TranslogConfig translogConfig = engineConfig.getTranslogConfig(); final String translogUUID = loadTranslogUUIDFromLastCommit(); // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong! return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier, - engineConfig.getPrimaryTermSupplier()); + engineConfig.getPrimaryTermSupplier(), persistedSequenceNumberConsumer); } // Package private for testing purposes only @@ -711,7 +723,7 @@ public class InternalEngine extends Engine { } else if (op.seqNo() > docAndSeqNo.seqNo) { status = OpVsLuceneDocStatus.OP_NEWER; } else if (op.seqNo() == docAndSeqNo.seqNo) { - assert localCheckpointTracker.contains(op.seqNo()) || softDeleteEnabled == false : + assert localCheckpointTracker.hasProcessed(op.seqNo()) || softDeleteEnabled == false : "local checkpoint tracker is not updated seq_no=" + op.seqNo() + " id=" + op.id(); status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; } else { @@ -914,7 +926,12 @@ public class InternalEngine extends Engine { versionMap.maybePutIndexUnderLock(index.uid().bytes(), new IndexVersionValue(translogLocation, plan.versionForIndexing, index.seqNo(), index.primaryTerm())); } - localCheckpointTracker.markSeqNoAsCompleted(indexResult.getSeqNo()); + localCheckpointTracker.markSeqNoAsProcessed(indexResult.getSeqNo()); + if (indexResult.getTranslogLocation() == null) { + // the op is coming from the translog (and is hence persisted already) or it does not have a sequence number + assert index.origin().isFromTranslog() || indexResult.getSeqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO; + localCheckpointTracker.markSeqNoAsPersisted(indexResult.getSeqNo()); + } indexResult.setTook(System.nanoTime() - index.startTime()); indexResult.freeze(); return indexResult; @@ -954,7 +971,7 @@ public class InternalEngine extends Engine { // unlike the primary, replicas don't really care to about creation status of documents // this allows to ignore the case where a document was found in the live version maps in // a delete state and return false for the created flag in favor of code simplicity - if (index.seqNo() <= localCheckpointTracker.getCheckpoint()){ + if (index.seqNo() <= localCheckpointTracker.getProcessedCheckpoint()){ // the operation seq# is lower then the current local checkpoint and thus was already put into lucene // this can happen during recovery where older operations are sent from the translog that are already // part of the lucene commit (either from a peer recovery or a local translog) @@ -1267,7 +1284,12 @@ public class InternalEngine extends Engine { final Translog.Location location = translog.add(new Translog.Delete(delete, deleteResult)); deleteResult.setTranslogLocation(location); } - localCheckpointTracker.markSeqNoAsCompleted(deleteResult.getSeqNo()); + localCheckpointTracker.markSeqNoAsProcessed(deleteResult.getSeqNo()); + if (deleteResult.getTranslogLocation() == null) { + // the op is coming from the translog (and is hence persisted already) or does not have a sequence number (version conflict) + assert delete.origin().isFromTranslog() || deleteResult.getSeqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO; + localCheckpointTracker.markSeqNoAsPersisted(deleteResult.getSeqNo()); + } deleteResult.setTook(System.nanoTime() - delete.startTime()); deleteResult.freeze(); } catch (RuntimeException | IOException e) { @@ -1300,7 +1322,7 @@ public class InternalEngine extends Engine { // this allows to ignore the case where a document was found in the live version maps in // a delete state and return true for the found flag in favor of code simplicity final DeletionStrategy plan; - if (delete.seqNo() <= localCheckpointTracker.getCheckpoint()) { + if (delete.seqNo() <= localCheckpointTracker.getProcessedCheckpoint()) { // the operation seq# is lower then the current local checkpoint and thus was already put into lucene // this can happen during recovery where older operations are sent from the translog that are already // part of the lucene commit (either from a peer recovery or a local translog) @@ -1474,10 +1496,10 @@ public class InternalEngine extends Engine { try (Releasable ignored = noOpKeyedLock.acquire(seqNo)) { final NoOpResult noOpResult; final Optional preFlightError = preFlightCheckForNoOp(noOp); + Exception failure = null; if (preFlightError.isPresent()) { - noOpResult = new NoOpResult(getPrimaryTerm(), noOp.seqNo(), preFlightError.get()); + noOpResult = new NoOpResult(getPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, preFlightError.get()); } else { - Exception failure = null; markSeqNoAsSeen(noOp.seqNo()); if (softDeleteEnabled) { try { @@ -1510,7 +1532,14 @@ public class InternalEngine extends Engine { noOpResult.setTranslogLocation(location); } } - localCheckpointTracker.markSeqNoAsCompleted(seqNo); + localCheckpointTracker.markSeqNoAsProcessed(noOpResult.getSeqNo()); + if (noOpResult.getTranslogLocation() == null) { + // the op is coming from the translog (and is hence persisted already) or it does not have a sequence number, or we failed + // to add a tombstone doc to Lucene with a non-fatal error, which would be very surprising + // TODO: always fail the engine in the last case, as this creates gaps in the history + assert noOp.origin().isFromTranslog() || noOpResult.getSeqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO || failure != null; + localCheckpointTracker.markSeqNoAsPersisted(noOpResult.getSeqNo()); + } noOpResult.setTook(System.nanoTime() - noOp.startTime()); noOpResult.freeze(); return noOpResult; @@ -1540,7 +1569,7 @@ public class InternalEngine extends Engine { // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) // both refresh types will result in an internal refresh but only the external will also // pass the new reader reference to the external reader manager. - final long localCheckpointBeforeRefresh = getLocalCheckpoint(); + final long localCheckpointBeforeRefresh = localCheckpointTracker.getProcessedCheckpoint(); boolean refreshed; try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); @@ -1682,9 +1711,9 @@ public class InternalEngine extends Engine { * This method is to maintain translog only, thus IndexWriter#hasUncommittedChanges condition is not considered. */ final long translogGenerationOfNewCommit = - translog.getMinGenerationForSeqNo(localCheckpointTracker.getCheckpoint() + 1).translogFileGeneration; + translog.getMinGenerationForSeqNo(localCheckpointTracker.getProcessedCheckpoint() + 1).translogFileGeneration; return translogGenerationOfLastCommit < translogGenerationOfNewCommit - || localCheckpointTracker.getCheckpoint() == localCheckpointTracker.getMaxSeqNo(); + || localCheckpointTracker.getProcessedCheckpoint() == localCheckpointTracker.getMaxSeqNo(); } @Override @@ -1871,7 +1900,7 @@ public class InternalEngine extends Engine { */ final long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); final long maxTimestampToPrune = timeMSec - engineConfig.getIndexSettings().getGcDeletesInMillis(); - versionMap.pruneTombstones(maxTimestampToPrune, localCheckpointTracker.getCheckpoint()); + versionMap.pruneTombstones(maxTimestampToPrune, localCheckpointTracker.getProcessedCheckpoint()); lastDeleteVersionPruneTimeMSec = timeMSec; } @@ -2361,7 +2390,7 @@ public class InternalEngine extends Engine { protected void commitIndexWriter(final IndexWriter writer, final Translog translog, @Nullable final String syncId) throws IOException { ensureCanFlush(); try { - final long localCheckpoint = localCheckpointTracker.getCheckpoint(); + final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); final Translog.TranslogGeneration translogGeneration = translog.getMinGenerationForSeqNo(localCheckpoint + 1); final String translogFileGeneration = Long.toString(translogGeneration.translogFileGeneration); final String translogUUID = translogGeneration.translogUUID; @@ -2452,7 +2481,6 @@ public class InternalEngine extends Engine { return mergeScheduler.stats(); } - // Used only for testing! Package private to prevent anyone else from using it LocalCheckpointTracker getLocalCheckpointTracker() { return localCheckpointTracker; } @@ -2462,9 +2490,13 @@ public class InternalEngine extends Engine { return getTranslog().getLastSyncedGlobalCheckpoint(); } + public long getProcessedLocalCheckpoint() { + return localCheckpointTracker.getProcessedCheckpoint(); + } + @Override - public long getLocalCheckpoint() { - return localCheckpointTracker.getCheckpoint(); + public long getPersistedLocalCheckpoint() { + return localCheckpointTracker.getPersistedCheckpoint(); } /** @@ -2487,7 +2519,7 @@ public class InternalEngine extends Engine { assert versionMap.assertKeyedLockHeldByCurrentThread(op.uid().bytes()); } } - return localCheckpointTracker.contains(op.seqNo()); + return localCheckpointTracker.hasProcessed(op.seqNo()); } @Override @@ -2577,7 +2609,7 @@ public class InternalEngine extends Engine { @Override public boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException { - final long currentLocalCheckpoint = getLocalCheckpointTracker().getCheckpoint(); + final long currentLocalCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); // avoid scanning translog if not necessary if (startingSeqNo > currentLocalCheckpoint) { return true; @@ -2587,11 +2619,11 @@ public class InternalEngine extends Engine { Translog.Operation operation; while ((operation = snapshot.next()) != null) { if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - tracker.markSeqNoAsCompleted(operation.seqNo()); + tracker.markSeqNoAsProcessed(operation.seqNo()); } } } - return tracker.getCheckpoint() >= currentLocalCheckpoint; + return tracker.getProcessedCheckpoint() >= currentLocalCheckpoint; } /** @@ -2707,7 +2739,7 @@ public class InternalEngine extends Engine { @Override public void beforeRefresh() { // all changes until this point should be visible after refresh - pendingCheckpoint = localCheckpointTracker.getCheckpoint(); + pendingCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); } @Override @@ -2768,7 +2800,7 @@ public class InternalEngine extends Engine { // Operations can be processed on a replica in a different order than on the primary. If the order on the primary is index-1, // delete-2, index-3, and the order on a replica is index-1, index-3, delete-2, then the msu of index-3 on the replica is 2 // even though it is an update (overwrites index-1). We should relax this assertion if there is a pending gap in the seq_no. - if (relaxIfGapInSeqNo && getLocalCheckpoint() < maxSeqNoOfUpdates) { + if (relaxIfGapInSeqNo && localCheckpointTracker.getProcessedCheckpoint() < maxSeqNoOfUpdates) { return true; } assert seqNo <= maxSeqNoOfUpdates : "id=" + id + " seq_no=" + seqNo + " msu=" + maxSeqNoOfUpdates; @@ -2792,7 +2824,7 @@ public class InternalEngine extends Engine { private void restoreVersionMapAndCheckpointTracker(DirectoryReader directoryReader) throws IOException { final IndexSearcher searcher = new IndexSearcher(directoryReader); searcher.setQueryCache(null); - final Query query = LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, getLocalCheckpoint() + 1, Long.MAX_VALUE); + final Query query = LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, getPersistedLocalCheckpoint() + 1, Long.MAX_VALUE); final Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1.0f); for (LeafReaderContext leaf : directoryReader.leaves()) { final Scorer scorer = weight.scorer(leaf); @@ -2809,7 +2841,8 @@ public class InternalEngine extends Engine { continue; // skip children docs which do not have primary term } final long seqNo = dv.docSeqNo(docId); - localCheckpointTracker.markSeqNoAsCompleted(seqNo); + localCheckpointTracker.markSeqNoAsProcessed(seqNo); + localCheckpointTracker.markSeqNoAsPersisted(seqNo); idFieldVisitor.reset(); leaf.reader().document(docId, idFieldVisitor); if (idFieldVisitor.getId() == null) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index 60777958880..e70bbedf089 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -329,7 +329,7 @@ public class ReadOnlyEngine extends Engine { } @Override - public long getLocalCheckpoint() { + public long getPersistedLocalCheckpoint() { return seqNoStats.getLocalCheckpoint(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 29f22d8dc2c..8a738208508 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -33,6 +33,8 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.support.AbstractXContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.FieldNamesFieldMapper.FieldNamesFieldType; import org.elasticsearch.index.similarity.SimilarityProvider; @@ -46,6 +48,7 @@ import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.HashMap; import java.util.Objects; import java.util.stream.StreamSupport; @@ -276,14 +279,33 @@ public abstract class FieldMapper extends Mapper implements Cloneable { context.doc().add(field); } } catch (Exception e) { - throw new MapperParsingException("failed to parse field [{}] of type [{}] in document with id '{}'", e, fieldType().name(), - fieldType().typeName(), context.sourceToParse().id()); + String valuePreview = ""; + try { + XContentParser parser = context.parser(); + Object complexValue = AbstractXContentParser.readValue(parser, HashMap::new, parser.currentToken()); + if (complexValue == null) { + valuePreview = "null"; + } else { + valuePreview = complexValue.toString(); + } + } catch (Exception innerException) { + throw new MapperParsingException("failed to parse field [{}] of type [{}] in document with id '{}'. " + + "Could not parse field value preview,", + e, fieldType().name(), fieldType().typeName(), context.sourceToParse().id()); + } + + throw new MapperParsingException("failed to parse field [{}] of type [{}] in document with id '{}'. " + + "Preview of field's value: '{}'", e, fieldType().name(), fieldType().typeName(), + context.sourceToParse().id(), valuePreview); } multiFields.parse(this, context); } /** * Parse the field value and populate fields. + * + * Implementations of this method should ensure that on failing to parse parser.currentToken() must be the + * current failing token */ protected abstract void parseCreateField(ParseContext context, List fields) throws IOException; diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java index 3aa501819b6..60fe95fedf5 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java @@ -19,25 +19,46 @@ package org.elasticsearch.index.reindex; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.script.Script; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.List; +import java.util.Map; +import static java.util.Collections.emptyMap; +import static java.util.Objects.requireNonNull; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; import static org.elasticsearch.index.VersionType.INTERNAL; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; /** * Request to reindex some documents from one index to another. This implements CompositeIndicesRequest but in a misleading way. Rather than @@ -335,4 +356,175 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest PARSER = new ObjectParser<>("reindex"); + static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in reindex requests is deprecated."; + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(ReindexRequest.class)); + + static { + ObjectParser.Parser sourceParser = (parser, request, context) -> { + // Funky hack to work around Search not having a proper ObjectParser and us wanting to extract query if using remote. + Map source = parser.map(); + String[] indices = extractStringArray(source, "index"); + if (indices != null) { + request.getSearchRequest().indices(indices); + } + String[] types = extractStringArray(source, "type"); + if (types != null) { + deprecationLogger.deprecatedAndMaybeLog("reindex_with_types", TYPES_DEPRECATION_MESSAGE); + request.getSearchRequest().types(types); + } + request.setRemoteInfo(buildRemoteInfo(source)); + XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()); + builder.map(source); + try (InputStream stream = BytesReference.bytes(builder).streamInput(); + XContentParser innerParser = parser.contentType().xContent() + .createParser(parser.getXContentRegistry(), parser.getDeprecationHandler(), stream)) { + request.getSearchRequest().source().parseXContent(innerParser, false); + } + }; + + ObjectParser destParser = new ObjectParser<>("dest"); + destParser.declareString(IndexRequest::index, new ParseField("index")); + destParser.declareString((request, type) -> { + deprecationLogger.deprecatedAndMaybeLog("reindex_with_types", TYPES_DEPRECATION_MESSAGE); + request.type(type); + }, new ParseField("type")); + destParser.declareString(IndexRequest::routing, new ParseField("routing")); + destParser.declareString(IndexRequest::opType, new ParseField("op_type")); + destParser.declareString(IndexRequest::setPipeline, new ParseField("pipeline")); + destParser.declareString((s, i) -> s.versionType(VersionType.fromString(i)), new ParseField("version_type")); + + PARSER.declareField(sourceParser::parse, new ParseField("source"), ObjectParser.ValueType.OBJECT); + PARSER.declareField((p, v, c) -> destParser.parse(p, v.getDestination(), c), new ParseField("dest"), ObjectParser.ValueType.OBJECT); + PARSER.declareInt(ReindexRequest::setMaxDocsValidateIdentical, new ParseField("max_docs", "size")); + PARSER.declareField((p, v, c) -> v.setScript(Script.parse(p)), new ParseField("script"), + ObjectParser.ValueType.OBJECT); + PARSER.declareString(ReindexRequest::setConflicts, new ParseField("conflicts")); + } + + public static ReindexRequest fromXContent(XContentParser parser) throws IOException { + ReindexRequest reindexRequest = new ReindexRequest(); + PARSER.parse(parser, reindexRequest, null); + return reindexRequest; + } + + /** + * Yank a string array from a map. Emulates XContent's permissive String to + * String array conversions. + */ + private static String[] extractStringArray(Map source, String name) { + Object value = source.remove(name); + if (value == null) { + return null; + } + if (value instanceof List) { + @SuppressWarnings("unchecked") + List list = (List) value; + return list.toArray(new String[list.size()]); + } else if (value instanceof String) { + return new String[] {(String) value}; + } else { + throw new IllegalArgumentException("Expected [" + name + "] to be a list of a string but was [" + value + ']'); + } + } + + static RemoteInfo buildRemoteInfo(Map source) throws IOException { + @SuppressWarnings("unchecked") + Map remote = (Map) source.remove("remote"); + if (remote == null) { + return null; + } + String username = extractString(remote, "username"); + String password = extractString(remote, "password"); + String hostInRequest = requireNonNull(extractString(remote, "host"), "[host] must be specified to reindex from a remote cluster"); + URI uri; + try { + uri = new URI(hostInRequest); + // URI has less stringent URL parsing than our code. We want to fail if all values are not provided. + if (uri.getPort() == -1) { + throw new URISyntaxException(hostInRequest, "The port was not defined in the [host]"); + } + } catch (URISyntaxException ex) { + throw new IllegalArgumentException("[host] must be of the form [scheme]://[host]:[port](/[pathPrefix])? but was [" + + hostInRequest + "]", ex); + } + + String scheme = uri.getScheme(); + String host = uri.getHost(); + int port = uri.getPort(); + + String pathPrefix = null; + if (uri.getPath().isEmpty() == false) { + pathPrefix = uri.getPath(); + } + + Map headers = extractStringStringMap(remote, "headers"); + TimeValue socketTimeout = extractTimeValue(remote, "socket_timeout", RemoteInfo.DEFAULT_SOCKET_TIMEOUT); + TimeValue connectTimeout = extractTimeValue(remote, "connect_timeout", RemoteInfo.DEFAULT_CONNECT_TIMEOUT); + if (false == remote.isEmpty()) { + throw new IllegalArgumentException( + "Unsupported fields in [remote]: [" + Strings.collectionToCommaDelimitedString(remote.keySet()) + "]"); + } + return new RemoteInfo(scheme, host, port, pathPrefix, queryForRemote(source), + username, password, headers, socketTimeout, connectTimeout); + } + + private static String extractString(Map source, String name) { + Object value = source.remove(name); + if (value == null) { + return null; + } + if (value instanceof String) { + return (String) value; + } + throw new IllegalArgumentException("Expected [" + name + "] to be a string but was [" + value + "]"); + } + + private static Map extractStringStringMap(Map source, String name) { + Object value = source.remove(name); + if (value == null) { + return emptyMap(); + } + if (false == value instanceof Map) { + throw new IllegalArgumentException("Expected [" + name + "] to be an object containing strings but was [" + value + "]"); + } + Map map = (Map) value; + for (Map.Entry entry : map.entrySet()) { + if (false == entry.getKey() instanceof String || false == entry.getValue() instanceof String) { + throw new IllegalArgumentException("Expected [" + name + "] to be an object containing strings but has [" + entry + "]"); + } + } + @SuppressWarnings("unchecked") // We just checked.... + Map safe = (Map) map; + return safe; + } + + private static TimeValue extractTimeValue(Map source, String name, TimeValue defaultValue) { + String string = extractString(source, name); + return string == null ? defaultValue : parseTimeValue(string, name); + } + + private static BytesReference queryForRemote(Map source) throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + Object query = source.remove("query"); + if (query == null) { + return BytesReference.bytes(matchAllQuery().toXContent(builder, ToXContent.EMPTY_PARAMS)); + } + if (!(query instanceof Map)) { + throw new IllegalArgumentException("Expected [query] to be an object but was [" + query + "]"); + } + @SuppressWarnings("unchecked") + Map map = (Map) query; + return BytesReference.bytes(builder.map(map)); + } + + static void setMaxDocsValidateIdentical(AbstractBulkByScrollRequest request, int maxDocs) { + if (request.getMaxDocs() != AbstractBulkByScrollRequest.MAX_DOCS_ALL_MATCHES && request.getMaxDocs() != maxDocs) { + throw new IllegalArgumentException("[max_docs] set to two different values [" + request.getMaxDocs() + "]" + + " and [" + maxDocs + "]"); + } else { + request.setMaxDocs(maxDocs); + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index d67cbc833d6..70e34623a41 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -118,7 +118,7 @@ public class GlobalCheckpointSyncAction extends TransportReplicationAction< private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && - indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { + indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getLastKnownGlobalCheckpoint()) { indexShard.sync(); } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java index a19d9ac4abb..185d3b2ad25 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java @@ -22,6 +22,8 @@ package org.elasticsearch.index.seqno; import com.carrotsearch.hppc.LongObjectHashMap; import org.elasticsearch.common.SuppressForbidden; +import java.util.concurrent.atomic.AtomicLong; + /** * This class generates sequences numbers and keeps track of the so-called "local checkpoint" which is the highest number for which all * previous sequence numbers have been processed (inclusive). @@ -35,20 +37,31 @@ public class LocalCheckpointTracker { static final short BIT_SET_SIZE = 1024; /** - * A collection of bit sets representing pending sequence numbers. Each sequence number is mapped to a bit set by dividing by the + * A collection of bit sets representing processed sequence numbers. Each sequence number is mapped to a bit set by dividing by the * bit set size. */ final LongObjectHashMap processedSeqNo = new LongObjectHashMap<>(); /** - * The current local checkpoint, i.e., all sequence numbers no more than this number have been completed. + * A collection of bit sets representing durably persisted sequence numbers. Each sequence number is mapped to a bit set by dividing by + * the bit set size. */ - volatile long checkpoint; + final LongObjectHashMap persistedSeqNo = new LongObjectHashMap<>(); + + /** + * The current local checkpoint, i.e., all sequence numbers no more than this number have been processed. + */ + final AtomicLong processedCheckpoint = new AtomicLong(); + + /** + * The current persisted local checkpoint, i.e., all sequence numbers no more than this number have been durably persisted. + */ + final AtomicLong persistedCheckpoint = new AtomicLong(); /** * The next available sequence number. */ - private volatile long nextSeqNo; + final AtomicLong nextSeqNo = new AtomicLong(); /** * Initialize the local checkpoint service. The {@code maxSeqNo} should be set to the last sequence number assigned, or @@ -68,8 +81,9 @@ public class LocalCheckpointTracker { throw new IllegalArgumentException( "max seq. no. must be non-negative or [" + SequenceNumbers.NO_OPS_PERFORMED + "] but was [" + maxSeqNo + "]"); } - nextSeqNo = maxSeqNo == SequenceNumbers.NO_OPS_PERFORMED ? 0 : maxSeqNo + 1; - checkpoint = localCheckpoint; + nextSeqNo.set(maxSeqNo + 1); + processedCheckpoint.set(localCheckpoint); + persistedCheckpoint.set(localCheckpoint); } /** @@ -77,48 +91,67 @@ public class LocalCheckpointTracker { * * @return the next assigned sequence number */ - public synchronized long generateSeqNo() { - return nextSeqNo++; + public long generateSeqNo() { + return nextSeqNo.getAndIncrement(); } /** * Marks the provided sequence number as seen and updates the max_seq_no if needed. */ - public synchronized void advanceMaxSeqNo(long seqNo) { - if (seqNo >= nextSeqNo) { - nextSeqNo = seqNo + 1; - } + public void advanceMaxSeqNo(final long seqNo) { + nextSeqNo.accumulateAndGet(seqNo + 1, Math::max); } /** - * Marks the processing of the provided sequence number as completed as updates the checkpoint if possible. + * Marks the provided sequence number as processed and updates the processed checkpoint if possible. * - * @param seqNo the sequence number to mark as completed + * @param seqNo the sequence number to mark as processed */ - public synchronized void markSeqNoAsCompleted(final long seqNo) { + public synchronized void markSeqNoAsProcessed(final long seqNo) { + markSeqNo(seqNo, processedCheckpoint, processedSeqNo); + } + + /** + * Marks the provided sequence number as persisted and updates the checkpoint if possible. + * + * @param seqNo the sequence number to mark as persisted + */ + public synchronized void markSeqNoAsPersisted(final long seqNo) { + markSeqNo(seqNo, persistedCheckpoint, persistedSeqNo); + } + + private void markSeqNo(final long seqNo, final AtomicLong checkPoint, final LongObjectHashMap bitSetMap) { + assert Thread.holdsLock(this); // make sure we track highest seen sequence number - if (seqNo >= nextSeqNo) { - nextSeqNo = seqNo + 1; - } - if (seqNo <= checkpoint) { + advanceMaxSeqNo(seqNo); + if (seqNo <= checkPoint.get()) { // this is possible during recovery where we might replay an operation that was also replicated return; } - final CountedBitSet bitSet = getBitSetForSeqNo(seqNo); + final CountedBitSet bitSet = getBitSetForSeqNo(bitSetMap, seqNo); final int offset = seqNoToBitSetOffset(seqNo); bitSet.set(offset); - if (seqNo == checkpoint + 1) { - updateCheckpoint(); + if (seqNo == checkPoint.get() + 1) { + updateCheckpoint(checkPoint, bitSetMap); } } /** - * The current checkpoint which can be advanced by {@link #markSeqNoAsCompleted(long)}. + * The current checkpoint which can be advanced by {@link #markSeqNoAsProcessed(long)}. * * @return the current checkpoint */ - public long getCheckpoint() { - return checkpoint; + public long getProcessedCheckpoint() { + return processedCheckpoint.get(); + } + + /** + * The current persisted checkpoint which can be advanced by {@link #markSeqNoAsPersisted(long)}. + * + * @return the current persisted checkpoint + */ + public long getPersistedCheckpoint() { + return persistedCheckpoint.get(); } /** @@ -127,17 +160,17 @@ public class LocalCheckpointTracker { * @return the maximum sequence number */ public long getMaxSeqNo() { - return nextSeqNo - 1; + return nextSeqNo.get() - 1; } /** * constructs a {@link SeqNoStats} object, using local state and the supplied global checkpoint * - * This is needed to make sure the local checkpoint and max seq no are consistent + * This is needed to make sure the persisted local checkpoint and max seq no are consistent */ public synchronized SeqNoStats getStats(final long globalCheckpoint) { - return new SeqNoStats(getMaxSeqNo(), getCheckpoint(), globalCheckpoint); + return new SeqNoStats(getMaxSeqNo(), getPersistedCheckpoint(), globalCheckpoint); } /** @@ -147,70 +180,74 @@ public class LocalCheckpointTracker { * @throws InterruptedException if the thread was interrupted while blocking on the condition */ @SuppressForbidden(reason = "Object#wait") - public synchronized void waitForOpsToComplete(final long seqNo) throws InterruptedException { - while (checkpoint < seqNo) { + public synchronized void waitForProcessedOpsToComplete(final long seqNo) throws InterruptedException { + while (processedCheckpoint.get() < seqNo) { // notified by updateCheckpoint this.wait(); } } /** - * Checks if the given sequence number was marked as completed in this tracker. + * Checks if the given sequence number was marked as processed in this tracker. */ - public boolean contains(final long seqNo) { + public boolean hasProcessed(final long seqNo) { assert seqNo >= 0 : "invalid seq_no=" + seqNo; - if (seqNo >= nextSeqNo) { + if (seqNo >= nextSeqNo.get()) { return false; } - if (seqNo <= checkpoint) { + if (seqNo <= processedCheckpoint.get()) { return true; } final long bitSetKey = getBitSetKey(seqNo); final int bitSetOffset = seqNoToBitSetOffset(seqNo); synchronized (this) { + // check again under lock + if (seqNo <= processedCheckpoint.get()) { + return true; + } final CountedBitSet bitSet = processedSeqNo.get(bitSetKey); return bitSet != null && bitSet.get(bitSetOffset); } } /** - * Moves the checkpoint to the last consecutively processed sequence number. This method assumes that the sequence number following the - * current checkpoint is processed. + * Moves the checkpoint to the last consecutively processed sequence number. This method assumes that the sequence number + * following the current checkpoint is processed. */ @SuppressForbidden(reason = "Object#notifyAll") - private void updateCheckpoint() { + private void updateCheckpoint(AtomicLong checkPoint, LongObjectHashMap bitSetMap) { assert Thread.holdsLock(this); - assert getBitSetForSeqNo(checkpoint + 1).get(seqNoToBitSetOffset(checkpoint + 1)) : + assert getBitSetForSeqNo(bitSetMap, checkPoint.get() + 1).get(seqNoToBitSetOffset(checkPoint.get() + 1)) : "updateCheckpoint is called but the bit following the checkpoint is not set"; try { // keep it simple for now, get the checkpoint one by one; in the future we can optimize and read words - long bitSetKey = getBitSetKey(checkpoint); - CountedBitSet current = processedSeqNo.get(bitSetKey); + long bitSetKey = getBitSetKey(checkPoint.get()); + CountedBitSet current = bitSetMap.get(bitSetKey); if (current == null) { // the bit set corresponding to the checkpoint has already been removed, set ourselves up for the next bit set - assert checkpoint % BIT_SET_SIZE == BIT_SET_SIZE - 1; - current = processedSeqNo.get(++bitSetKey); + assert checkPoint.get() % BIT_SET_SIZE == BIT_SET_SIZE - 1; + current = bitSetMap.get(++bitSetKey); } do { - checkpoint++; + checkPoint.incrementAndGet(); /* * The checkpoint always falls in the current bit set or we have already cleaned it; if it falls on the last bit of the * current bit set, we can clean it. */ - if (checkpoint == lastSeqNoInBitSet(bitSetKey)) { + if (checkPoint.get() == lastSeqNoInBitSet(bitSetKey)) { assert current != null; - final CountedBitSet removed = processedSeqNo.remove(bitSetKey); + final CountedBitSet removed = bitSetMap.remove(bitSetKey); assert removed == current; - current = processedSeqNo.get(++bitSetKey); + current = bitSetMap.get(++bitSetKey); } - } while (current != null && current.get(seqNoToBitSetOffset(checkpoint + 1))); + } while (current != null && current.get(seqNoToBitSetOffset(checkPoint.get() + 1))); } finally { - // notifies waiters in waitForOpsToComplete + // notifies waiters in waitForProcessedOpsToComplete this.notifyAll(); } } - private long lastSeqNoInBitSet(final long bitSetKey) { + private static long lastSeqNoInBitSet(final long bitSetKey) { return (1 + bitSetKey) * BIT_SET_SIZE - 1; } @@ -220,32 +257,32 @@ public class LocalCheckpointTracker { * @param seqNo the sequence number to obtain the bit set for * @return the bit set corresponding to the provided sequence number */ - private long getBitSetKey(final long seqNo) { + private static long getBitSetKey(final long seqNo) { return seqNo / BIT_SET_SIZE; } - private CountedBitSet getBitSetForSeqNo(final long seqNo) { + private CountedBitSet getBitSetForSeqNo(final LongObjectHashMap bitSetMap, final long seqNo) { assert Thread.holdsLock(this); final long bitSetKey = getBitSetKey(seqNo); - final int index = processedSeqNo.indexOf(bitSetKey); + final int index = bitSetMap.indexOf(bitSetKey); final CountedBitSet bitSet; - if (processedSeqNo.indexExists(index)) { - bitSet = processedSeqNo.indexGet(index); + if (bitSetMap.indexExists(index)) { + bitSet = bitSetMap.indexGet(index); } else { bitSet = new CountedBitSet(BIT_SET_SIZE); - processedSeqNo.indexInsert(index, bitSetKey, bitSet); + bitSetMap.indexInsert(index, bitSetKey, bitSet); } return bitSet; } /** * Obtain the position in the bit set corresponding to the provided sequence number. The bit set corresponding to the sequence number - * can be obtained via {@link #getBitSetForSeqNo(long)}. + * can be obtained via {@link #getBitSetForSeqNo(LongObjectHashMap, long)}. * * @param seqNo the sequence number to obtain the position for * @return the position in the bit set corresponding to the provided sequence number */ - private int seqNoToBitSetOffset(final long seqNo) { + private static int seqNoToBitSetOffset(final long seqNo) { return Math.toIntExact(seqNo % BIT_SET_SIZE); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 6c01c721c34..d3bcdd0cd3c 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -146,9 +146,15 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L final Map checkpoints; /** - * A callback invoked when the global checkpoint is updated. For primary mode this occurs if the computed global checkpoint advances on - * the basis of state changes tracked here. For non-primary mode this occurs if the local knowledge of the global checkpoint advances - * due to an update from the primary. + * The current in-memory global checkpoint. In primary mode, this is a cached version of the checkpoint computed from the local + * checkpoints. In replica mode, this is the in-memory global checkpoint that's communicated by the primary. + */ + volatile long globalCheckpoint; + + /** + * A callback invoked when the in-memory global checkpoint is updated. For primary mode this occurs if the computed global checkpoint + * advances on the basis of state changes tracked here. For non-primary mode this occurs if the local knowledge of the global checkpoint + * advances due to an update from the primary. */ private final LongConsumer onGlobalCheckpointUpdated; @@ -393,13 +399,13 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L public static class CheckpointState implements Writeable { /** - * the last local checkpoint information that we have for this shard + * the last local checkpoint information that we have for this shard. All operations up to this point are properly fsynced to disk. */ long localCheckpoint; /** - * the last global checkpoint information that we have for this shard. This information is computed for the primary if - * the tracker is in primary mode and received from the primary if in replica mode. + * the last global checkpoint information that we have for this shard. This is the global checkpoint that's fsynced to disk on the + * respective shard, and all operations up to this point are properly fsynced to disk as well. */ long globalCheckpoint; /** @@ -494,9 +500,9 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L } /** - * Get the local knowledge of the global checkpoints for all in-sync allocation IDs. + * Get the local knowledge of the persisted global checkpoints for all in-sync allocation IDs. * - * @return a map from allocation ID to the local knowledge of the global checkpoint for that allocation ID + * @return a map from allocation ID to the local knowledge of the persisted global checkpoint for that allocation ID */ public synchronized ObjectLongMap getInSyncGlobalCheckpoints() { assert primaryMode; @@ -549,20 +555,11 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * as a logical operator, many of the invariants are written under the form (!A || B), they should be read as (A implies B) however. */ private boolean invariant() { - assert checkpoints.get(shardAllocationId) != null : - "checkpoints map should always have an entry for the current shard"; - // local checkpoints only set during primary mode assert primaryMode || checkpoints.values().stream().allMatch(lcps -> lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO); - // global checkpoints for other shards only set during primary mode - assert primaryMode - || checkpoints - .entrySet() - .stream() - .filter(e -> e.getKey().equals(shardAllocationId) == false) - .map(Map.Entry::getValue) - .allMatch(cps -> cps.globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO); + // global checkpoints only set during primary mode + assert primaryMode || checkpoints.values().stream().allMatch(cps -> cps.globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO); // relocation handoff can only occur in primary mode assert !handoffInProgress || primaryMode; @@ -591,14 +588,14 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L // the computed global checkpoint is always up-to-date assert !primaryMode - || getGlobalCheckpoint() == computeGlobalCheckpoint(pendingInSync, checkpoints.values(), getGlobalCheckpoint()) + || globalCheckpoint == computeGlobalCheckpoint(pendingInSync, checkpoints.values(), globalCheckpoint) : "global checkpoint is not up-to-date, expected: " + - computeGlobalCheckpoint(pendingInSync, checkpoints.values(), getGlobalCheckpoint()) + " but was: " + getGlobalCheckpoint(); + computeGlobalCheckpoint(pendingInSync, checkpoints.values(), globalCheckpoint) + " but was: " + globalCheckpoint; // when in primary mode, the global checkpoint is at most the minimum local checkpoint on all in-sync shard copies assert !primaryMode - || getGlobalCheckpoint() <= inSyncCheckpointStates(checkpoints, CheckpointState::getLocalCheckpoint, LongStream::min) - : "global checkpoint [" + getGlobalCheckpoint() + "] " + || globalCheckpoint <= inSyncCheckpointStates(checkpoints, CheckpointState::getLocalCheckpoint, LongStream::min) + : "global checkpoint [" + globalCheckpoint + "] " + "for primary mode allocation ID [" + shardAllocationId + "] " + "more than in-sync local checkpoints [" + checkpoints + "]"; @@ -672,8 +669,8 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L this.operationPrimaryTerm = operationPrimaryTerm; this.handoffInProgress = false; this.appliedClusterStateVersion = -1L; + this.globalCheckpoint = globalCheckpoint; this.checkpoints = new HashMap<>(1 + indexSettings.getNumberOfReplicas()); - checkpoints.put(allocationId, new CheckpointState(SequenceNumbers.UNASSIGNED_SEQ_NO, globalCheckpoint, false, false)); this.onGlobalCheckpointUpdated = Objects.requireNonNull(onGlobalCheckpointUpdated); this.currentTimeMillisSupplier = Objects.requireNonNull(currentTimeMillisSupplier); this.onSyncRetentionLeases = Objects.requireNonNull(onSyncRetentionLeases); @@ -700,28 +697,26 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L } /** - * Returns the global checkpoint for the shard. + * Returns the in-memory global checkpoint for the shard. * * @return the global checkpoint */ - public synchronized long getGlobalCheckpoint() { - final CheckpointState cps = checkpoints.get(shardAllocationId); - assert cps != null; - return cps.globalCheckpoint; + public long getGlobalCheckpoint() { + return globalCheckpoint; } @Override public long getAsLong() { - return getGlobalCheckpoint(); + return globalCheckpoint; } /** * Updates the global checkpoint on a replica shard after it has been updated by the primary. * - * @param globalCheckpoint the global checkpoint - * @param reason the reason the global checkpoint was updated + * @param newGlobalCheckpoint the new global checkpoint + * @param reason the reason the global checkpoint was updated */ - public synchronized void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final String reason) { + public synchronized void updateGlobalCheckpointOnReplica(final long newGlobalCheckpoint, final String reason) { assert invariant(); assert primaryMode == false; /* @@ -730,18 +725,17 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * replica shards). In these cases, the local knowledge of the global checkpoint could be higher than the sync from the lagging * primary. */ - updateGlobalCheckpoint( - shardAllocationId, - globalCheckpoint, - current -> { - logger.trace("updated global checkpoint from [{}] to [{}] due to [{}]", current, globalCheckpoint, reason); - onGlobalCheckpointUpdated.accept(globalCheckpoint); - }); + final long previousGlobalCheckpoint = globalCheckpoint; + if (newGlobalCheckpoint > previousGlobalCheckpoint) { + globalCheckpoint = newGlobalCheckpoint; + logger.trace("updated global checkpoint from [{}] to [{}] due to [{}]", previousGlobalCheckpoint, globalCheckpoint, reason); + onGlobalCheckpointUpdated.accept(globalCheckpoint); + } assert invariant(); } /** - * Update the local knowledge of the global checkpoint for the specified allocation ID. + * Update the local knowledge of the persisted global checkpoint for the specified allocation ID. * * @param allocationId the allocation ID to update the global checkpoint for * @param globalCheckpoint the global checkpoint @@ -750,24 +744,15 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L assert primaryMode; assert handoffInProgress == false; assert invariant(); - updateGlobalCheckpoint( - allocationId, - globalCheckpoint, - current -> logger.trace( - "updated local knowledge for [{}] on the primary of the global checkpoint from [{}] to [{}]", - allocationId, - current, - globalCheckpoint)); - assert invariant(); - } - - private void updateGlobalCheckpoint(final String allocationId, final long globalCheckpoint, LongConsumer ifUpdated) { final CheckpointState cps = checkpoints.get(allocationId); assert !this.shardAllocationId.equals(allocationId) || cps != null; if (cps != null && globalCheckpoint > cps.globalCheckpoint) { + final long previousGlobalCheckpoint = cps.globalCheckpoint; cps.globalCheckpoint = globalCheckpoint; - ifUpdated.accept(cps.globalCheckpoint); + logger.trace("updated local knowledge for [{}] on the primary of the global checkpoint from [{}] to [{}]", + allocationId, previousGlobalCheckpoint, globalCheckpoint); } + assert invariant(); } /** @@ -824,23 +809,14 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L } } else { for (String initializingId : initializingAllocationIds) { - if (shardAllocationId.equals(initializingId) == false) { - final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - final long globalCheckpoint = localCheckpoint; - checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, false, false)); - } + final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + final long globalCheckpoint = localCheckpoint; + checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, false, false)); } for (String inSyncId : inSyncAllocationIds) { - if (shardAllocationId.equals(inSyncId)) { - // current shard is initially marked as not in-sync because we don't know better at that point - CheckpointState checkpointState = checkpoints.get(shardAllocationId); - checkpointState.inSync = true; - checkpointState.tracked = true; - } else { - final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - final long globalCheckpoint = localCheckpoint; - checkpoints.put(inSyncId, new CheckpointState(localCheckpoint, globalCheckpoint, true, true)); - } + final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + final long globalCheckpoint = localCheckpoint; + checkpoints.put(inSyncId, new CheckpointState(localCheckpoint, globalCheckpoint, true, true)); } } appliedClusterStateVersion = applyingClusterStateVersion; @@ -1000,13 +976,11 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L */ private synchronized void updateGlobalCheckpointOnPrimary() { assert primaryMode; - final CheckpointState cps = checkpoints.get(shardAllocationId); - final long globalCheckpoint = cps.globalCheckpoint; final long computedGlobalCheckpoint = computeGlobalCheckpoint(pendingInSync, checkpoints.values(), getGlobalCheckpoint()); assert computedGlobalCheckpoint >= globalCheckpoint : "new global checkpoint [" + computedGlobalCheckpoint + "] is lower than previous one [" + globalCheckpoint + "]"; if (globalCheckpoint != computedGlobalCheckpoint) { - cps.globalCheckpoint = computedGlobalCheckpoint; + globalCheckpoint = computedGlobalCheckpoint; logger.trace("updated global checkpoint to [{}]", computedGlobalCheckpoint); onGlobalCheckpointUpdated.accept(computedGlobalCheckpoint); } @@ -1056,13 +1030,10 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L primaryMode = false; handoffInProgress = false; relocated = true; - // forget all checkpoint information except for global checkpoint of current shard + // forget all checkpoint information checkpoints.forEach((key, cps) -> { cps.localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - if (key.equals(shardAllocationId) == false) { - // don't throw global checkpoint information of current shard away - cps.globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + cps.globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; }); assert invariant(); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java b/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java index a56f8670c23..e1b992643fa 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java @@ -58,7 +58,7 @@ public class SeqNoStats implements ToXContentFragment, Writeable { return maxSeqNo; } - /** the maximum sequence number for which all previous operations (including) have been completed */ + /** the maximum sequence number for which all previous operations (including) have been persisted */ public long getLocalCheckpoint() { return localCheckpoint; } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index fdd95614756..2372353ef85 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -540,7 +540,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl */ engine.rollTranslogGeneration(); engine.fillSeqNoGaps(newPrimaryTerm); - replicationTracker.updateLocalCheckpoint(currentRouting.allocationId().getId(), getLocalCheckpoint()); + replicationTracker.updateLocalCheckpoint(currentRouting.allocationId().getId(), + getLocalCheckpoint()); primaryReplicaSyncer.accept(this, new ActionListener() { @Override public void onResponse(ResyncTask resyncTask) { @@ -1865,7 +1866,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } /** - * Update the local knowledge of the global checkpoint for the specified allocation ID. + * Update the local knowledge of the persisted global checkpoint for the specified allocation ID. * * @param allocationId the allocation ID to update the global checkpoint for * @param globalCheckpoint the global checkpoint @@ -2079,12 +2080,12 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } /** - * Returns the local checkpoint for the shard. + * Returns the persisted local checkpoint for the shard. * * @return the local checkpoint */ public long getLocalCheckpoint() { - return getEngine().getLocalCheckpoint(); + return getEngine().getPersistedLocalCheckpoint(); } /** @@ -2092,7 +2093,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl * * @return the global checkpoint */ - public long getGlobalCheckpoint() { + public long getLastKnownGlobalCheckpoint() { return replicationTracker.getGlobalCheckpoint(); } @@ -2125,15 +2126,19 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return; } assert assertPrimaryMode(); - // only sync if there are not operations in flight + // only sync if there are no operations in flight, or when using async durability final SeqNoStats stats = getEngine().getSeqNoStats(replicationTracker.getGlobalCheckpoint()); - if (stats.getMaxSeqNo() == stats.getGlobalCheckpoint()) { + final boolean asyncDurability = indexSettings().getTranslogDurability() == Translog.Durability.ASYNC; + if (stats.getMaxSeqNo() == stats.getGlobalCheckpoint() || asyncDurability) { final ObjectLongMap globalCheckpoints = getInSyncGlobalCheckpoints(); - final String allocationId = routingEntry().allocationId().getId(); - assert globalCheckpoints.containsKey(allocationId); - final long globalCheckpoint = globalCheckpoints.get(allocationId); + final long globalCheckpoint = replicationTracker.getGlobalCheckpoint(); + // async durability means that the local checkpoint might lag (as it is only advanced on fsync) + // periodically ask for the newest local checkpoint by syncing the global checkpoint, so that ultimately the global + // checkpoint can be synced final boolean syncNeeded = - StreamSupport + (asyncDurability && stats.getGlobalCheckpoint() < stats.getMaxSeqNo()) + // check if the persisted global checkpoint + || StreamSupport .stream(globalCheckpoints.values().spliterator(), false) .anyMatch(v -> v.value < globalCheckpoint); // only sync if index is not closed and there is a shard lagging the primary @@ -2192,7 +2197,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl assert shardRouting.primary() && shardRouting.isRelocationTarget() : "only primary relocation target can update allocation IDs from primary context: " + shardRouting; assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) && - getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint(); + getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()) + .getLocalCheckpoint() || indexSettings().getTranslogDurability() == Translog.Durability.ASYNC; synchronized (mutex) { replicationTracker.activateWithPrimaryContext(primaryContext); // make changes to primaryMode flag only under mutex } @@ -2733,7 +2739,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl bumpPrimaryTerm(opPrimaryTerm, () -> { updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition"); - final long currentGlobalCheckpoint = getGlobalCheckpoint(); + final long currentGlobalCheckpoint = getLastKnownGlobalCheckpoint(); final long maxSeqNo = seqNoStats().getMaxSeqNo(); logger.info("detected new primary with primary term [{}], global checkpoint [{}], max_seq_no [{}]", opPrimaryTerm, currentGlobalCheckpoint, maxSeqNo); @@ -3103,7 +3109,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl flush(new FlushRequest().waitIfOngoing(true)); SetOnce newEngineReference = new SetOnce<>(); - final long globalCheckpoint = getGlobalCheckpoint(); + final long globalCheckpoint = getLastKnownGlobalCheckpoint(); + assert globalCheckpoint == getLastSyncedGlobalCheckpoint(); synchronized (mutex) { verifyNotClosed(); // we must create both new read-only engine and new read-write engine under mutex to ensure snapshotStoreMetadata, diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index 07aade95292..17ef424185d 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -84,7 +84,7 @@ public class PrimaryReplicaSyncer { public void resync(final IndexShard indexShard, final ActionListener listener) { Translog.Snapshot snapshot = null; try { - final long startingSeqNo = indexShard.getGlobalCheckpoint() + 1; + final long startingSeqNo = indexShard.getLastKnownGlobalCheckpoint() + 1; final long maxSeqNo = indexShard.seqNoStats().getMaxSeqNo(); final ShardId shardId = indexShard.shardId(); // Wrap translog snapshot to make it synchronized as it is accessed by different threads through SnapshotSender. diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 4d23ce8cdc0..5e97834b7e7 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -64,6 +64,7 @@ import java.util.OptionalLong; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.LongConsumer; import java.util.function.LongSupplier; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -130,6 +131,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final LongSupplier primaryTermSupplier; private final String translogUUID; private final TranslogDeletionPolicy deletionPolicy; + private final LongConsumer persistedSequenceNumberConsumer; /** * Creates a new Translog instance. This method will create a new transaction log unless the given {@link TranslogGeneration} is @@ -147,14 +149,18 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * examined and stored in the header whenever a new generation is rolled. It's guaranteed from outside * that a new generation is rolled when the term is increased. This guarantee allows to us to validate * and reject operation whose term is higher than the primary term stored in the translog header. + * @param persistedSequenceNumberConsumer a callback that's called whenever an operation with a given sequence number is successfully + * persisted. */ public Translog( final TranslogConfig config, final String translogUUID, TranslogDeletionPolicy deletionPolicy, - final LongSupplier globalCheckpointSupplier, final LongSupplier primaryTermSupplier) throws IOException { + final LongSupplier globalCheckpointSupplier, final LongSupplier primaryTermSupplier, + final LongConsumer persistedSequenceNumberConsumer) throws IOException { super(config.getShardId(), config.getIndexSettings()); this.config = config; this.globalCheckpointSupplier = globalCheckpointSupplier; this.primaryTermSupplier = primaryTermSupplier; + this.persistedSequenceNumberConsumer = persistedSequenceNumberConsumer; this.deletionPolicy = deletionPolicy; this.translogUUID = translogUUID; bigArrays = config.getBigArrays(); @@ -191,7 +197,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC boolean success = false; current = null; try { - current = createWriter(checkpoint.generation + 1, getMinFileGeneration(), checkpoint.globalCheckpoint); + current = createWriter(checkpoint.generation + 1, getMinFileGeneration(), checkpoint.globalCheckpoint, + persistedSequenceNumberConsumer); success = true; } finally { // we have to close all the recovered ones otherwise we leak file handles here @@ -479,7 +486,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * @throws IOException if creating the translog failed */ TranslogWriter createWriter(long fileGeneration) throws IOException { - final TranslogWriter writer = createWriter(fileGeneration, getMinFileGeneration(), globalCheckpointSupplier.getAsLong()); + final TranslogWriter writer = createWriter(fileGeneration, getMinFileGeneration(), globalCheckpointSupplier.getAsLong(), + persistedSequenceNumberConsumer); assert writer.sizeInBytes() == DEFAULT_HEADER_SIZE_IN_BYTES : "Mismatch translog header size; " + "empty translog size [" + writer.sizeInBytes() + ", header size [" + DEFAULT_HEADER_SIZE_IN_BYTES + "]"; return writer; @@ -494,7 +502,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * With no readers and no current, a call to {@link #getMinFileGeneration()} would not work. * @param initialGlobalCheckpoint the global checkpoint to be written in the first checkpoint. */ - TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint) throws IOException { + TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint, + LongConsumer persistedSequenceNumberConsumer) throws IOException { final TranslogWriter newFile; try { newFile = TranslogWriter.create( @@ -505,7 +514,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC getChannelFactory(), config.getBufferSize(), initialMinTranslogGen, initialGlobalCheckpoint, - globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong(), tragedy); + globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong(), tragedy, + persistedSequenceNumberConsumer); } catch (final IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); } @@ -1876,7 +1886,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC location.resolve(getFilename(1)), channelFactory, new ByteSizeValue(10), 1, initialGlobalCheckpoint, () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm, - new TragicExceptionHolder()); + new TragicExceptionHolder(), seqNo -> { throw new UnsupportedOperationException(); }); writer.close(); return translogUUID; } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 6b00b0c5db3..0695a2bf650 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.translog; +import com.carrotsearch.hppc.LongArrayList; +import com.carrotsearch.hppc.procedures.LongProcedure; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Assertions; @@ -42,6 +44,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.LongConsumer; import java.util.function.LongSupplier; public class TranslogWriter extends BaseTranslogReader implements Closeable { @@ -64,10 +67,15 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { private final LongSupplier globalCheckpointSupplier; private final LongSupplier minTranslogGenerationSupplier; + // callback that's called whenever an operation with a given sequence number is successfully persisted. + private final LongConsumer persistedSequenceNumberConsumer; + protected final AtomicBoolean closed = new AtomicBoolean(false); // lock order synchronized(syncLock) -> synchronized(this) private final Object syncLock = new Object(); + private LongArrayList nonFsyncedSequenceNumbers; + private final Map> seenSequenceNumbers; private TranslogWriter( @@ -78,7 +86,8 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { final Path path, final ByteSizeValue bufferSize, final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier, TranslogHeader header, - TragicExceptionHolder tragedy) + TragicExceptionHolder tragedy, + final LongConsumer persistedSequenceNumberConsumer) throws IOException { super(initialCheckpoint.generation, channel, path, header); @@ -97,6 +106,8 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { this.maxSeqNo = initialCheckpoint.maxSeqNo; assert initialCheckpoint.trimmedAboveSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO : initialCheckpoint.trimmedAboveSeqNo; this.globalCheckpointSupplier = globalCheckpointSupplier; + this.nonFsyncedSequenceNumbers = new LongArrayList(64); + this.persistedSequenceNumberConsumer = persistedSequenceNumberConsumer; this.seenSequenceNumbers = Assertions.ENABLED ? new HashMap<>() : null; this.tragedy = tragedy; } @@ -104,7 +115,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory, ByteSizeValue bufferSize, final long initialMinTranslogGen, long initialGlobalCheckpoint, final LongSupplier globalCheckpointSupplier, final LongSupplier minTranslogGenerationSupplier, - final long primaryTerm, TragicExceptionHolder tragedy) + final long primaryTerm, TragicExceptionHolder tragedy, LongConsumer persistedSequenceNumberConsumer) throws IOException { final FileChannel channel = channelFactory.open(file); try { @@ -125,7 +136,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { writerGlobalCheckpointSupplier = globalCheckpointSupplier; } return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, - writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header, tragedy); + writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header, tragedy, persistedSequenceNumberConsumer); } catch (Exception exception) { // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation @@ -177,6 +188,8 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { minSeqNo = SequenceNumbers.min(minSeqNo, seqNo); maxSeqNo = SequenceNumbers.max(maxSeqNo, seqNo); + nonFsyncedSequenceNumbers.add(seqNo); + operationCounter++; assert assertNoSeqNumberConflict(seqNo, data); @@ -338,7 +351,9 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { * @return true if this call caused an actual sync operation */ public boolean syncUpTo(long offset) throws IOException { + boolean synced = false; if (lastSyncedCheckpoint.offset < offset && syncNeeded()) { + LongArrayList flushedSequenceNumbers = null; synchronized (syncLock) { // only one sync/checkpoint should happen concurrently but we wait if (lastSyncedCheckpoint.offset < offset && syncNeeded()) { // double checked locking - we don't want to fsync unless we have to and now that we have @@ -349,6 +364,8 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { try { outputStream.flush(); checkpointToSync = getCheckpoint(); + flushedSequenceNumbers = nonFsyncedSequenceNumbers; + nonFsyncedSequenceNumbers = new LongArrayList(64); } catch (final Exception ex) { closeWithTragicEvent(ex); throw ex; @@ -366,11 +383,14 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { assert lastSyncedCheckpoint.offset <= checkpointToSync.offset : "illegal state: " + lastSyncedCheckpoint.offset + " <= " + checkpointToSync.offset; lastSyncedCheckpoint = checkpointToSync; // write protected by syncLock - return true; + synced = true; } } + if (flushedSequenceNumbers != null) { + flushedSequenceNumbers.forEach((LongProcedure) persistedSequenceNumberConsumer::accept); + } } - return false; + return synced; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java index 01a7836d813..7cf165a5b11 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java @@ -181,7 +181,7 @@ public class TruncateTranslogAction { new TranslogDeletionPolicy(indexSettings.getTranslogRetentionSize().getBytes(), indexSettings.getTranslogRetentionAge().getMillis()); try (Translog translog = new Translog(translogConfig, translogUUID, - translogDeletionPolicy, () -> translogGlobalCheckpoint, () -> primaryTerm); + translogDeletionPolicy, () -> translogGlobalCheckpoint, () -> primaryTerm, seqNo -> {}); Translog.Snapshot snapshot = translog.newSnapshot()) { //noinspection StatementWithEmptyBody we are just checking that we can iterate through the whole snapshot while (snapshot.next() != null) { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index a8e420613ae..ff5ea0b2930 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -177,7 +177,7 @@ public class RecoverySourceHandler { startingSeqNo = 0; try { final int estimateNumOps = shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo); - sendFileResult = phase1(phase1Snapshot.getIndexCommit(), shard.getGlobalCheckpoint(), () -> estimateNumOps); + sendFileResult = phase1(phase1Snapshot.getIndexCommit(), shard.getLastKnownGlobalCheckpoint(), () -> estimateNumOps); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e); } finally { @@ -644,7 +644,7 @@ public class RecoverySourceHandler { */ runUnderPrimaryPermit(() -> shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint), shardId + " marking " + request.targetAllocationId() + " as in sync", shard, cancellableThreads, logger); - final long globalCheckpoint = shard.getGlobalCheckpoint(); + final long globalCheckpoint = shard.getLastKnownGlobalCheckpoint(); // this global checkpoint is persisted in finalizeRecovery final StepListener finalizeListener = new StepListener<>(); cancellableThreads.executeIO(() -> recoveryTarget.finalizeRecovery(globalCheckpoint, finalizeListener)); finalizeListener.whenComplete(r -> { @@ -712,7 +712,8 @@ public class RecoverySourceHandler { final BytesArray content = new BytesArray(buffer, 0, bytesRead); final boolean lastChunk = position + content.length() == md.length(); final long requestSeqId = requestSeqIdTracker.generateSeqNo(); - cancellableThreads.execute(() -> requestSeqIdTracker.waitForOpsToComplete(requestSeqId - maxConcurrentFileChunks)); + cancellableThreads.execute( + () -> requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqId - maxConcurrentFileChunks)); cancellableThreads.checkForCancel(); if (error.get() != null) { break; @@ -721,10 +722,10 @@ public class RecoverySourceHandler { cancellableThreads.executeIO(() -> recoveryTarget.writeFileChunk(md, requestFilePosition, content, lastChunk, translogOps.get(), ActionListener.wrap( - r -> requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId), + r -> requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId), e -> { error.compareAndSet(null, Tuple.tuple(md, e)); - requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); } ))); position += content.length(); @@ -737,7 +738,7 @@ public class RecoverySourceHandler { // When we terminate exceptionally, we don't wait for the outstanding requests as we don't use their results anyway. // This allows us to end quickly and eliminate the complexity of handling requestSeqIds in case of error. if (error.get() == null) { - cancellableThreads.execute(() -> requestSeqIdTracker.waitForOpsToComplete(requestSeqIdTracker.getMaxSeqNo())); + cancellableThreads.execute(() -> requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqIdTracker.getMaxSeqNo())); } if (error.get() != null) { handleErrorOnSendFiles(store, error.get().v1(), error.get().v2()); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index 430697c686e..e78819087ac 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -929,7 +929,7 @@ public class RecoveryState implements ToXContentFragment, Streamable, Writeable builder.field(Fields.REUSED, reusedFileCount()); builder.field(Fields.RECOVERED, recoveredFileCount()); builder.field(Fields.PERCENT, String.format(Locale.ROOT, "%1.1f%%", recoveredFilesPercent())); - if (params.paramAsBoolean("details", false)) { + if (params.paramAsBoolean("detailed", false)) { builder.startArray(Fields.DETAILS); for (File file : fileDetails.values()) { file.toXContent(builder, params); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index 230f5351575..ec3c22d42a1 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -70,7 +70,6 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { .withTimeout(recoverySettings.internalActionLongTimeout()) .build(); this.fileChunkRequestOptions = TransportRequestOptions.builder() - // we are saving the cpu for other things .withType(TransportRequestOptions.Type.RECOVERY) .withTimeout(recoverySettings.internalActionTimeout()) .build(); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index dbd9cca5271..ccf8876318f 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -233,7 +233,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl remote = new RemoteClusterConnection(settings, clusterAlias, seedList, transportService, numRemoteConnections, getNodePredicate(settings), proxyAddress, connectionProfile); remoteClusters.put(clusterAlias, remote); - } else if (connectionProfileChanged(remote.getConnectionManager().getConnectionProfile(), connectionProfile)) { + } else if (connectionProfileChanged(remote.getConnectionManager().getConnectionProfile(), connectionProfile) + || seedsChanged(remote.getSeedNodes(), seedList)) { // New ConnectionProfile. Must tear down existing connection try { IOUtils.close(remote); @@ -472,6 +473,16 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl || Objects.equals(oldProfile.getPingInterval(), newProfile.getPingInterval()) == false; } + private boolean seedsChanged(final List>> oldSeedNodes, + final List>> newSeedNodes) { + if (oldSeedNodes.size() != newSeedNodes.size()) { + return true; + } + Set oldSeeds = oldSeedNodes.stream().map(Tuple::v1).collect(Collectors.toSet()); + Set newSeeds = newSeedNodes.stream().map(Tuple::v1).collect(Collectors.toSet()); + return oldSeeds.equals(newSeeds) == false; + } + /** * Collects all nodes of the given clusters and returns / passes a (clusterAlias, nodeId) to {@link DiscoveryNode} * function on success. diff --git a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java index 2de2f259e6f..3b5d1ad43da 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java @@ -35,9 +35,9 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.RemoteClusterAware; +import java.io.IOException; import java.util.Optional; -import static org.elasticsearch.ExceptionsHelper.MAX_ITERATIONS; import static org.elasticsearch.ExceptionsHelper.maybeError; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -81,20 +81,14 @@ public class ExceptionsHelperTests extends ESTestCase { if (fatal) { assertError(cause, error); } else { - assertFalse(maybeError(cause, logger).isPresent()); + assertFalse(maybeError(cause).isPresent()); } - assertFalse(maybeError(new Exception(new DecoderException()), logger).isPresent()); - - Throwable chain = outOfMemoryError; - for (int i = 0; i < MAX_ITERATIONS; i++) { - chain = new Exception(chain); - } - assertFalse(maybeError(chain, logger).isPresent()); + assertFalse(maybeError(new Exception(new DecoderException())).isPresent()); } private void assertError(final Throwable cause, final Error error) { - final Optional maybeError = maybeError(cause, logger); + final Optional maybeError = maybeError(cause); assertTrue(maybeError.isPresent()); assertThat(maybeError.get(), equalTo(error)); } @@ -211,4 +205,29 @@ public class ExceptionsHelperTests extends ESTestCase { withSuppressedException.addSuppressed(new RuntimeException()); assertThat(ExceptionsHelper.unwrapCorruption(withSuppressedException), nullValue()); } + + public void testSuppressedCycle() { + RuntimeException e1 = new RuntimeException(); + RuntimeException e2 = new RuntimeException(); + e1.addSuppressed(e2); + e2.addSuppressed(e1); + ExceptionsHelper.unwrapCorruption(e1); + + final CorruptIndexException corruptIndexException = new CorruptIndexException("corrupt", "resource"); + RuntimeException e3 = new RuntimeException(corruptIndexException); + e3.addSuppressed(e1); + assertThat(ExceptionsHelper.unwrapCorruption(e3), equalTo(corruptIndexException)); + + RuntimeException e4 = new RuntimeException(e1); + e4.addSuppressed(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(e4), equalTo(corruptIndexException)); + } + + public void testCauseCycle() { + RuntimeException e1 = new RuntimeException(); + RuntimeException e2 = new RuntimeException(e1); + e1.initCause(e2); + ExceptionsHelper.unwrap(e1, IOException.class); + ExceptionsHelper.unwrapCorruption(e1); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java index 08516d3a882..c1e31e422bf 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java @@ -22,9 +22,14 @@ package org.elasticsearch.action.admin.indices.close; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse.IndexResult; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; @@ -32,8 +37,10 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.transport.ActionNotFoundTransportException; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; @@ -46,6 +53,38 @@ import static org.hamcrest.Matchers.nullValue; public class CloseIndexResponseTests extends ESTestCase { + /** + * Test that random responses can be written to xcontent without errors. + * Also check some specific simple cases for output. + */ + public void testToXContent() throws IOException { + CloseIndexResponse response = randomResponse(); + XContentType xContentType = randomFrom(XContentType.values()); + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + } + + Index index = new Index("test", "uuid"); + IndexResult indexResult = new CloseIndexResponse.IndexResult(index); + CloseIndexResponse closeIndexResponse = new CloseIndexResponse(true, true, + Collections.singletonList(indexResult)); + assertEquals("{\"acknowledged\":true,\"shards_acknowledged\":true,\"indices\":{\"test\":{\"closed\":true}}}", + Strings.toString(closeIndexResponse)); + + CloseIndexResponse.ShardResult[] shards = new CloseIndexResponse.ShardResult[1]; + shards[0] = new CloseIndexResponse.ShardResult(0, new CloseIndexResponse.ShardResult.Failure[] { + new CloseIndexResponse.ShardResult.Failure("test", 0, new ActionNotFoundTransportException("test"), "nodeId") }); + indexResult = new CloseIndexResponse.IndexResult(index, shards); + closeIndexResponse = new CloseIndexResponse(true, true, + Collections.singletonList(indexResult)); + assertEquals("{\"acknowledged\":true,\"shards_acknowledged\":true," + + "\"indices\":{\"test\":{\"closed\":false,\"failedShards\":{\"0\":{" + + "\"failures\":[{\"node\":\"nodeId\",\"shard\":0,\"index\":\"test\",\"status\":\"INTERNAL_SERVER_ERROR\"," + + "\"reason\":{\"type\":\"action_not_found_transport_exception\"," + + "\"reason\":\"No handler for action [test]\"}}]}}}}}", + Strings.toString(closeIndexResponse)); + } + public void testSerialization() throws Exception { final CloseIndexResponse response = randomResponse(); try (BytesStreamOutput out = new BytesStreamOutput()) { @@ -131,7 +170,10 @@ public class CloseIndexResponseTests extends ESTestCase { acknowledged = false; failures = new CloseIndexResponse.ShardResult.Failure[randomIntBetween(1, 3)]; for (int j = 0; j < failures.length; j++) { - String nodeId = randomAlphaOfLength(5); + String nodeId = null; + if (frequently()) { + nodeId = randomAlphaOfLength(5); + } failures[j] = new CloseIndexResponse.ShardResult.Failure(indexName, i, randomException(index, i), nodeId); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index cd4d8ae6857..e6891eb3b03 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -136,9 +136,13 @@ public class TransportVerifyShardBeforeCloseActionTests extends ESTestCase { } private void executeOnPrimaryOrReplica() throws Throwable { + executeOnPrimaryOrReplica(false); + } + + private void executeOnPrimaryOrReplica(boolean phase1) throws Throwable { final TaskId taskId = new TaskId("_node_id", randomNonNegativeLong()); final TransportVerifyShardBeforeCloseAction.ShardRequest request = - new TransportVerifyShardBeforeCloseAction.ShardRequest(indexShard.shardId(), clusterBlock, taskId); + new TransportVerifyShardBeforeCloseAction.ShardRequest(indexShard.shardId(), clusterBlock, phase1, taskId); final PlainActionFuture res = PlainActionFuture.newFuture(); action.shardOperationOnPrimary(request, indexShard, ActionListener.wrap( r -> { @@ -165,6 +169,11 @@ public class TransportVerifyShardBeforeCloseActionTests extends ESTestCase { assertThat(flushRequest.getValue().force(), is(true)); } + public void testShardIsSynced() throws Throwable { + executeOnPrimaryOrReplica(true); + verify(indexShard, times(1)).sync(); + } + public void testOperationFailsWhenNotBlocked() { when(indexShard.getActiveOperationsCount()).thenReturn(randomIntBetween(0, 10)); @@ -227,7 +236,7 @@ public class TransportVerifyShardBeforeCloseActionTests extends ESTestCase { final PlainActionFuture listener = new PlainActionFuture<>(); TaskId taskId = new TaskId(clusterService.localNode().getId(), 0L); TransportVerifyShardBeforeCloseAction.ShardRequest request = - new TransportVerifyShardBeforeCloseAction.ShardRequest(shardId, clusterBlock, taskId); + new TransportVerifyShardBeforeCloseAction.ShardRequest(shardId, clusterBlock, false, taskId); ReplicationOperation.Replicas proxy = action.newReplicasProxy(); ReplicationOperation operation = new ReplicationOperation<>( @@ -268,53 +277,56 @@ public class TransportVerifyShardBeforeCloseActionTests extends ESTestCase { TransportVerifyShardBeforeCloseAction.ShardRequest, PrimaryResult> createPrimary(final ShardRouting primary, final ReplicationGroup replicationGroup) { - return new ReplicationOperation.Primary< - TransportVerifyShardBeforeCloseAction.ShardRequest, - TransportVerifyShardBeforeCloseAction.ShardRequest, - PrimaryResult>() { - @Override - public ShardRouting routingEntry() { - return primary; - } + return new ReplicationOperation.Primary() { + @Override + public ShardRouting routingEntry() { + return primary; + } - @Override - public ReplicationGroup getReplicationGroup() { - return replicationGroup; - } + @Override + public ReplicationGroup getReplicationGroup() { + return replicationGroup; + } - @Override - public void perform( - TransportVerifyShardBeforeCloseAction.ShardRequest request, ActionListener listener) { - listener.onResponse(new PrimaryResult(request)); - } + @Override + public void perform( + TransportVerifyShardBeforeCloseAction.ShardRequest request, ActionListener listener) { + listener.onResponse(new PrimaryResult(request)); + } - @Override - public void failShard(String message, Exception exception) { + @Override + public void failShard(String message, Exception exception) { - } + } - @Override - public void updateLocalCheckpointForShard(String allocationId, long checkpoint) { - } + @Override + public void updateLocalCheckpointForShard(String allocationId, long checkpoint) { + } - @Override - public void updateGlobalCheckpointForShard(String allocationId, long globalCheckpoint) { - } + @Override + public void updateGlobalCheckpointForShard(String allocationId, long globalCheckpoint) { + } - @Override - public long localCheckpoint() { - return 0; - } + @Override + public long localCheckpoint() { + return 0; + } - @Override - public long globalCheckpoint() { - return 0; - } + @Override + public long computedGlobalCheckpoint() { + return 0; + } - @Override - public long maxSeqNoOfUpdatesOrDeletes() { - return 0; - } + @Override + public long globalCheckpoint() { + return 0; + } + + @Override + public long maxSeqNoOfUpdatesOrDeletes() { + return 0; + } }; } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRejectionIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRejectionIT.java index 900f50a9be0..9104ae78810 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRejectionIT.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRejectionIT.java @@ -23,8 +23,13 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -41,6 +46,18 @@ public class BulkRejectionIT extends ESIntegTestCase { .build(); } + @Override + protected Collection> nodePlugins() { + return Arrays.asList(InternalSettingsPlugin.class); + } + + @Override + public Settings indexSettings() { + return Settings.builder().put(super.indexSettings()) + // sync global checkpoint quickly so we can verify seq_no_stats aligned between all copies after tests. + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s").build(); + } + @Override protected int numberOfReplicas() { return 1; diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index d9de69a1c6c..f3ff6f7acf4 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -40,7 +40,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -94,8 +93,8 @@ public class ExpandSearchPhaseTests extends ESTestCase { for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { InternalSearchResponse internalSearchResponse = new InternalSearchResponse(collapsedHits.get(innerHitNum), null, null, null, false, null, 1); - SearchResponse response = mockSearchPhaseContext.buildSearchResponse(internalSearchResponse, null); - mSearchResponses.add(new MultiSearchResponse.Item(response, null)); + mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); + mSearchResponses.add(new MultiSearchResponse.Item(mockSearchPhaseContext.searchResponse.get(), null)); } listener.onResponse( @@ -107,20 +106,19 @@ public class ExpandSearchPhaseTests extends ESTestCase { Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(collapseValue))))}, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - AtomicReference reference = new AtomicReference<>(); ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, (r) -> new SearchPhase("test") { @Override - public void run() throws IOException { - reference.set(mockSearchPhaseContext.buildSearchResponse(r, null)); + public void run() { + mockSearchPhaseContext.sendSearchResponse(r, null); } } ); phase.run(); mockSearchPhaseContext.assertNoFailure(); - assertNotNull(reference.get()); - SearchResponse theResponse = reference.get(); + SearchResponse theResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(theResponse); assertEquals(numInnerHits, theResponse.getHits().getHits()[0].getInnerHits().size()); for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { @@ -148,11 +146,12 @@ public class ExpandSearchPhaseTests extends ESTestCase { assertTrue(executedMultiSearch.compareAndSet(false, true)); InternalSearchResponse internalSearchResponse = new InternalSearchResponse(collapsedHits, null, null, null, false, null, 1); - SearchResponse response = mockSearchPhaseContext.buildSearchResponse(internalSearchResponse, null); + SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, 1, 1, 0, 0, + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); listener.onResponse(new MultiSearchResponse( new MultiSearchResponse.Item[]{ new MultiSearchResponse.Item(null, new RuntimeException("boom")), - new MultiSearchResponse.Item(response, null) + new MultiSearchResponse.Item(searchResponse, null) }, randomIntBetween(1, 10000))); } }; @@ -163,12 +162,11 @@ public class ExpandSearchPhaseTests extends ESTestCase { Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(collapseValue))))}, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - AtomicReference reference = new AtomicReference<>(); ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, r -> new SearchPhase("test") { @Override - public void run() throws IOException { - reference.set(mockSearchPhaseContext.buildSearchResponse(r, null)); + public void run() { + mockSearchPhaseContext.sendSearchResponse(r, null); } } ); @@ -176,7 +174,7 @@ public class ExpandSearchPhaseTests extends ESTestCase { assertThat(mockSearchPhaseContext.phaseFailure.get(), Matchers.instanceOf(RuntimeException.class)); assertEquals("boom", mockSearchPhaseContext.phaseFailure.get().getMessage()); assertNotNull(mockSearchPhaseContext.phaseFailure.get()); - assertNull(reference.get()); + assertNull(mockSearchPhaseContext.searchResponse.get()); assertEquals(0, mockSearchPhaseContext.phasesExecuted.get()); } @@ -195,18 +193,17 @@ public class ExpandSearchPhaseTests extends ESTestCase { Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(null))))}, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - AtomicReference reference = new AtomicReference<>(); ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, r -> new SearchPhase("test") { @Override - public void run() throws IOException { - reference.set(mockSearchPhaseContext.buildSearchResponse(r, null)); + public void run() { + mockSearchPhaseContext.sendSearchResponse(r, null); } } ); phase.run(); mockSearchPhaseContext.assertNoFailure(); - assertNotNull(reference.get()); + assertNotNull(mockSearchPhaseContext.searchResponse.get()); assertEquals(1, mockSearchPhaseContext.phasesExecuted.get()); } @@ -223,18 +220,17 @@ public class ExpandSearchPhaseTests extends ESTestCase { SearchHits hits = new SearchHits(new SearchHit[0], new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - AtomicReference reference = new AtomicReference<>(); ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, r -> new SearchPhase("test") { @Override - public void run() throws IOException { - reference.set(mockSearchPhaseContext.buildSearchResponse(r, null)); + public void run() { + mockSearchPhaseContext.sendSearchResponse(r, null); } } ); phase.run(); mockSearchPhaseContext.assertNoFailure(); - assertNotNull(reference.get()); + assertNotNull(mockSearchPhaseContext.searchResponse.get()); assertEquals(1, mockSearchPhaseContext.phasesExecuted.get()); } @@ -269,18 +265,17 @@ public class ExpandSearchPhaseTests extends ESTestCase { SearchHits hits = new SearchHits(new SearchHit[0], new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - AtomicReference reference = new AtomicReference<>(); ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, r -> new SearchPhase("test") { @Override - public void run() throws IOException { - reference.set(mockSearchPhaseContext.buildSearchResponse(r, null)); + public void run() { + mockSearchPhaseContext.sendSearchResponse(r, null); } } ); phase.run(); mockSearchPhaseContext.assertNoFailure(); - assertNotNull(reference.get()); + assertNotNull(mockSearchPhaseContext.searchResponse.get()); assertEquals(1, mockSearchPhaseContext.phasesExecuted.get()); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 5614476c851..886e6dc1fe5 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -42,7 +42,6 @@ import org.elasticsearch.transport.Transport; import java.io.IOException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; public class FetchSearchPhaseTests extends ESTestCase { @@ -52,7 +51,6 @@ public class FetchSearchPhaseTests extends ESTestCase { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); InitialSearchPhase.ArraySearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 1); - AtomicReference responseRef = new AtomicReference<>(); boolean hasHits = randomBoolean(); final int numHits; if (hasHits) { @@ -73,17 +71,18 @@ public class FetchSearchPhaseTests extends ESTestCase { FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext, (searchResponse, scrollId) -> new SearchPhase("test") { @Override - public void run() throws IOException { - responseRef.set(mockSearchPhaseContext.buildSearchResponse(searchResponse, null)); + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); } }); assertEquals("fetch", phase.getName()); phase.run(); mockSearchPhaseContext.assertNoFailure(); - assertNotNull(responseRef.get()); - assertEquals(numHits, responseRef.get().getHits().getTotalHits().value); + SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(searchResponse); + assertEquals(numHits, searchResponse.getHits().getTotalHits().value); if (numHits != 0) { - assertEquals(42, responseRef.get().getHits().getAt(0).docId()); + assertEquals(42, searchResponse.getHits().getAt(0).docId()); } assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); } @@ -94,7 +93,6 @@ public class FetchSearchPhaseTests extends ESTestCase { (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); InitialSearchPhase.ArraySearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); - AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); @@ -111,7 +109,7 @@ public class FetchSearchPhaseTests extends ESTestCase { queryResult.setShardIndex(1); results.consumeResult(queryResult); - SearchTransportService searchTransportService = new SearchTransportService(null, null) { + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -127,23 +125,23 @@ public class FetchSearchPhaseTests extends ESTestCase { listener.onResponse(fetchResult); } }; - mockSearchPhaseContext.searchTransport = searchTransportService; FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext, (searchResponse, scrollId) -> new SearchPhase("test") { @Override - public void run() throws IOException { - responseRef.set(mockSearchPhaseContext.buildSearchResponse(searchResponse, null)); + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); } }); assertEquals("fetch", phase.getName()); phase.run(); mockSearchPhaseContext.assertNoFailure(); - assertNotNull(responseRef.get()); - assertEquals(2, responseRef.get().getHits().getTotalHits().value); - assertEquals(84, responseRef.get().getHits().getAt(0).docId()); - assertEquals(42, responseRef.get().getHits().getAt(1).docId()); - assertEquals(0, responseRef.get().getFailedShards()); - assertEquals(2, responseRef.get().getSuccessfulShards()); + SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(searchResponse); + assertEquals(2, searchResponse.getHits().getTotalHits().value); + assertEquals(84, searchResponse.getHits().getAt(0).docId()); + assertEquals(42, searchResponse.getHits().getAt(1).docId()); + assertEquals(0, searchResponse.getFailedShards()); + assertEquals(2, searchResponse.getSuccessfulShards()); assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); } @@ -153,7 +151,6 @@ public class FetchSearchPhaseTests extends ESTestCase { (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); InitialSearchPhase.ArraySearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); - AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); @@ -170,7 +167,7 @@ public class FetchSearchPhaseTests extends ESTestCase { queryResult.setShardIndex(1); results.consumeResult(queryResult); - SearchTransportService searchTransportService = new SearchTransportService(null, null) { + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -185,24 +182,24 @@ public class FetchSearchPhaseTests extends ESTestCase { } }; - mockSearchPhaseContext.searchTransport = searchTransportService; FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext, (searchResponse, scrollId) -> new SearchPhase("test") { @Override - public void run() throws IOException { - responseRef.set(mockSearchPhaseContext.buildSearchResponse(searchResponse, null)); + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); } }); assertEquals("fetch", phase.getName()); phase.run(); mockSearchPhaseContext.assertNoFailure(); - assertNotNull(responseRef.get()); - assertEquals(2, responseRef.get().getHits().getTotalHits().value); - assertEquals(84, responseRef.get().getHits().getAt(0).docId()); - assertEquals(1, responseRef.get().getFailedShards()); - assertEquals(1, responseRef.get().getSuccessfulShards()); - assertEquals(1, responseRef.get().getShardFailures().length); - assertTrue(responseRef.get().getShardFailures()[0].getCause() instanceof MockDirectoryWrapper.FakeIOException); + SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(searchResponse); + assertEquals(2, searchResponse.getHits().getTotalHits().value); + assertEquals(84, searchResponse.getHits().getAt(0).docId()); + assertEquals(1, searchResponse.getFailedShards()); + assertEquals(1, searchResponse.getSuccessfulShards()); + assertEquals(1, searchResponse.getShardFailures().length); + assertTrue(searchResponse.getShardFailures()[0].getCause() instanceof MockDirectoryWrapper.FakeIOException); assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(123L)); } @@ -216,7 +213,6 @@ public class FetchSearchPhaseTests extends ESTestCase { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(numHits); InitialSearchPhase.ArraySearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), numHits); - AtomicReference responseRef = new AtomicReference<>(); for (int i = 0; i < numHits; i++) { QuerySearchResult queryResult = new QuerySearchResult(i, new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); @@ -226,7 +222,7 @@ public class FetchSearchPhaseTests extends ESTestCase { queryResult.setShardIndex(i); results.consumeResult(queryResult); } - SearchTransportService searchTransportService = new SearchTransportService(null, null) { + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -238,13 +234,12 @@ public class FetchSearchPhaseTests extends ESTestCase { }).start(); } }; - mockSearchPhaseContext.searchTransport = searchTransportService; CountDownLatch latch = new CountDownLatch(1); FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext, (searchResponse, scrollId) -> new SearchPhase("test") { @Override - public void run() throws IOException { - responseRef.set(mockSearchPhaseContext.buildSearchResponse(searchResponse, null)); + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); latch.countDown(); } }); @@ -252,17 +247,18 @@ public class FetchSearchPhaseTests extends ESTestCase { phase.run(); latch.await(); mockSearchPhaseContext.assertNoFailure(); - assertNotNull(responseRef.get()); - assertEquals(numHits, responseRef.get().getHits().getTotalHits().value); - assertEquals(Math.min(numHits, resultSetSize), responseRef.get().getHits().getHits().length); - SearchHit[] hits = responseRef.get().getHits().getHits(); + SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(searchResponse); + assertEquals(numHits, searchResponse.getHits().getTotalHits().value); + assertEquals(Math.min(numHits, resultSetSize), searchResponse.getHits().getHits().length); + SearchHit[] hits = searchResponse.getHits().getHits(); for (int i = 0; i < hits.length; i++) { assertNotNull(hits[i]); assertEquals("index: " + i, numHits-i, hits[i].docId()); assertEquals("index: " + i, numHits-1-i, (int)hits[i].getScore()); } - assertEquals(0, responseRef.get().getFailedShards()); - assertEquals(numHits, responseRef.get().getSuccessfulShards()); + assertEquals(0, searchResponse.getFailedShards()); + assertEquals(numHits, searchResponse.getSuccessfulShards()); int sizeReleasedContexts = Math.max(0, numHits - resultSetSize); // all non fetched results will be freed assertEquals(mockSearchPhaseContext.releasedSearchContexts.toString(), sizeReleasedContexts, mockSearchPhaseContext.releasedSearchContexts.size()); @@ -274,7 +270,6 @@ public class FetchSearchPhaseTests extends ESTestCase { (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); InitialSearchPhase.ArraySearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); - AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); @@ -291,7 +286,7 @@ public class FetchSearchPhaseTests extends ESTestCase { queryResult.setShardIndex(1); results.consumeResult(queryResult); AtomicInteger numFetches = new AtomicInteger(0); - SearchTransportService searchTransportService = new SearchTransportService(null, null) { + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -310,19 +305,18 @@ public class FetchSearchPhaseTests extends ESTestCase { listener.onResponse(fetchResult); } }; - mockSearchPhaseContext.searchTransport = searchTransportService; FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext, (searchResponse, scrollId) -> new SearchPhase("test") { @Override - public void run() throws IOException { - responseRef.set(mockSearchPhaseContext.buildSearchResponse(searchResponse, null)); + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); } }); assertEquals("fetch", phase.getName()); phase.run(); assertNotNull(mockSearchPhaseContext.phaseFailure.get()); assertEquals(mockSearchPhaseContext.phaseFailure.get().getMessage(), "BOOM"); - assertNull(responseRef.get()); + assertNull(mockSearchPhaseContext.searchResponse.get()); assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); } @@ -332,7 +326,6 @@ public class FetchSearchPhaseTests extends ESTestCase { (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); InitialSearchPhase.ArraySearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); - AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = 1; QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); @@ -349,7 +342,7 @@ public class FetchSearchPhaseTests extends ESTestCase { queryResult.setShardIndex(1); results.consumeResult(queryResult); - SearchTransportService searchTransportService = new SearchTransportService(null, null) { + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -363,23 +356,23 @@ public class FetchSearchPhaseTests extends ESTestCase { listener.onResponse(fetchResult); } }; - mockSearchPhaseContext.searchTransport = searchTransportService; FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext, (searchResponse, scrollId) -> new SearchPhase("test") { @Override - public void run() throws IOException { - responseRef.set(mockSearchPhaseContext.buildSearchResponse(searchResponse, null)); + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); } }); assertEquals("fetch", phase.getName()); phase.run(); mockSearchPhaseContext.assertNoFailure(); - assertNotNull(responseRef.get()); - assertEquals(2, responseRef.get().getHits().getTotalHits().value); - assertEquals(1, responseRef.get().getHits().getHits().length); - assertEquals(84, responseRef.get().getHits().getAt(0).docId()); - assertEquals(0, responseRef.get().getFailedShards()); - assertEquals(2, responseRef.get().getSuccessfulShards()); + SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(searchResponse); + assertEquals(2, searchResponse.getHits().getTotalHits().value); + assertEquals(1, searchResponse.getHits().getHits().length); + assertEquals(84, searchResponse.getHits().getAt(0).docId()); + assertEquals(0, searchResponse.getFailedShards()); + assertEquals(2, searchResponse.getSuccessfulShards()); assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(123L)); } diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index 2a155d2e3ad..40c3ad0afc0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -49,6 +49,7 @@ public final class MockSearchPhaseContext implements SearchPhaseContext { Set releasedSearchContexts = new HashSet<>(); SearchRequest searchRequest = new SearchRequest(); AtomicInteger phasesExecuted = new AtomicInteger(); + AtomicReference searchResponse = new AtomicReference<>(); public MockSearchPhaseContext(int numShards) { this.numShards = numShards; @@ -82,9 +83,9 @@ public final class MockSearchPhaseContext implements SearchPhaseContext { } @Override - public SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) { - return new SearchResponse(internalSearchResponse, scrollId, numShards, numSuccess.get(), 0, 0, - failures.toArray(new ShardSearchFailure[failures.size()]), SearchResponse.Clusters.EMPTY); + public void sendSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) { + searchResponse.set(new SearchResponse(internalSearchResponse, scrollId, numShards, numSuccess.get(), 0, 0, + failures.toArray(ShardSearchFailure.EMPTY_ARRAY), SearchResponse.Clusters.EMPTY)); } @Override @@ -130,11 +131,6 @@ public final class MockSearchPhaseContext implements SearchPhaseContext { command.run(); } - @Override - public void onResponse(SearchResponse response) { - Assert.fail("should not be called"); - } - @Override public void onFailure(Exception e) { Assert.fail("should not be called"); diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index 60d94c269a9..c8abb4765c1 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -60,7 +60,7 @@ public class MultiSearchRequestTests extends ESTestCase { LogManager.getLogger(MultiSearchRequestTests.class)); public void testSimpleAdd() throws Exception { - MultiSearchRequest request = parseMultiSearchRequest("/org/elasticsearch/action/search/simple-msearch1.json"); + MultiSearchRequest request = parseMultiSearchRequestFromFile("/org/elasticsearch/action/search/simple-msearch1.json"); assertThat(request.requests().size(), equalTo(8)); assertThat(request.requests().get(0).indices()[0], @@ -136,7 +136,7 @@ public class MultiSearchRequestTests extends ESTestCase { } public void testSimpleAdd2() throws Exception { - MultiSearchRequest request = parseMultiSearchRequest("/org/elasticsearch/action/search/simple-msearch2.json"); + MultiSearchRequest request = parseMultiSearchRequestFromFile("/org/elasticsearch/action/search/simple-msearch2.json"); assertThat(request.requests().size(), equalTo(5)); assertThat(request.requests().get(0).indices()[0], equalTo("test")); assertThat(request.requests().get(0).types().length, equalTo(0)); @@ -152,7 +152,7 @@ public class MultiSearchRequestTests extends ESTestCase { } public void testSimpleAdd3() throws Exception { - MultiSearchRequest request = parseMultiSearchRequest("/org/elasticsearch/action/search/simple-msearch3.json"); + MultiSearchRequest request = parseMultiSearchRequestFromFile("/org/elasticsearch/action/search/simple-msearch3.json"); assertThat(request.requests().size(), equalTo(4)); assertThat(request.requests().get(0).indices()[0], equalTo("test0")); assertThat(request.requests().get(0).indices()[1], equalTo("test1")); @@ -169,7 +169,7 @@ public class MultiSearchRequestTests extends ESTestCase { } public void testSimpleAdd4() throws Exception { - MultiSearchRequest request = parseMultiSearchRequest("/org/elasticsearch/action/search/simple-msearch4.json"); + MultiSearchRequest request = parseMultiSearchRequestFromFile("/org/elasticsearch/action/search/simple-msearch4.json"); assertThat(request.requests().size(), equalTo(3)); assertThat(request.requests().get(0).indices()[0], equalTo("test0")); assertThat(request.requests().get(0).indices()[1], equalTo("test1")); @@ -188,7 +188,16 @@ public class MultiSearchRequestTests extends ESTestCase { } public void testEmptyFirstLine1() throws Exception { - MultiSearchRequest request = parseMultiSearchRequest("/org/elasticsearch/action/search/msearch-empty-first-line1.json"); + MultiSearchRequest request = parseMultiSearchRequestFromString( + "\n" + + "\n" + + "{ \"query\": {\"match_all\": {}}}\n" + + "{}\n" + + "{ \"query\": {\"match_all\": {}}}\n" + + "\n" + + "{ \"query\": {\"match_all\": {}}}\n" + + "{}\n" + + "{ \"query\": {\"match_all\": {}}}\n"); assertThat(request.requests().size(), equalTo(4)); for (SearchRequest searchRequest : request.requests()) { assertThat(searchRequest.indices().length, equalTo(0)); @@ -199,7 +208,16 @@ public class MultiSearchRequestTests extends ESTestCase { } public void testEmptyFirstLine2() throws Exception { - MultiSearchRequest request = parseMultiSearchRequest("/org/elasticsearch/action/search/msearch-empty-first-line2.json"); + MultiSearchRequest request = parseMultiSearchRequestFromString( + "\n" + + "{}\n" + + "{ \"query\": {\"match_all\": {}}}\n" + + "\n" + + "{ \"query\": {\"match_all\": {}}}\n" + + "{}\n" + + "{ \"query\": {\"match_all\": {}}}\n" + + "\n" + + "{ \"query\": {\"match_all\": {}}}\n"); assertThat(request.requests().size(), equalTo(4)); for (SearchRequest searchRequest : request.requests()) { assertThat(searchRequest.indices().length, equalTo(0)); @@ -254,11 +272,19 @@ public class MultiSearchRequestTests extends ESTestCase { assertEquals(3, msearchRequest.requests().size()); } - private MultiSearchRequest parseMultiSearchRequest(String sample) throws IOException { - byte[] data = StreamsUtils.copyToBytesFromClasspath(sample); - RestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()) - .withContent(new BytesArray(data), XContentType.JSON).build(); + private MultiSearchRequest parseMultiSearchRequestFromString(String request) throws IOException { + return parseMultiSearchRequest(new FakeRestRequest.Builder(xContentRegistry()) + .withContent(new BytesArray(request), XContentType.JSON).build()); + } + private MultiSearchRequest parseMultiSearchRequestFromFile(String sample) throws IOException { + byte[] data = StreamsUtils.copyToBytesFromClasspath(sample); + return parseMultiSearchRequest(new FakeRestRequest.Builder(xContentRegistry()) + .withContent(new BytesArray(data), XContentType.JSON).build()); + + } + + private MultiSearchRequest parseMultiSearchRequest(RestRequest restRequest) throws IOException { MultiSearchRequest request = new MultiSearchRequest(); RestMultiSearchAction.parseMultiLineRequest(restRequest, SearchRequest.DEFAULT_INDICES_OPTIONS, true, (searchRequest, parser) -> { diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index c959e3ed45d..9f86d190a64 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -137,6 +137,7 @@ public class ReplicationOperationTests extends ESTestCase { assertThat(primary.knownLocalCheckpoints.remove(primaryShard.allocationId().getId()), equalTo(primary.localCheckpoint)); assertThat(primary.knownLocalCheckpoints, equalTo(replicasProxy.generatedLocalCheckpoints)); + assertThat(primary.knownGlobalCheckpoints.remove(primaryShard.allocationId().getId()), equalTo(primary.globalCheckpoint)); assertThat(primary.knownGlobalCheckpoints, equalTo(replicasProxy.generatedGlobalCheckpoints)); } @@ -533,6 +534,11 @@ public class ReplicationOperationTests extends ESTestCase { return globalCheckpoint; } + @Override + public long computedGlobalCheckpoint() { + return globalCheckpoint; + } + @Override public long maxSeqNoOfUpdatesOrDeletes() { return maxSeqNoOfUpdatesOrDeletes; diff --git a/server/src/test/java/org/elasticsearch/common/joda/JodaDateMathParserTests.java b/server/src/test/java/org/elasticsearch/common/joda/JodaDateMathParserTests.java index 4440c4e1b11..f6382b92343 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JodaDateMathParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JodaDateMathParserTests.java @@ -151,8 +151,13 @@ public class JodaDateMathParserTests extends ESTestCase { assertDateMathEquals("now", "2014-11-18T14:27:32", now, false, null); assertDateMathEquals("now+M", "2014-12-18T14:27:32", now, false, null); + assertDateMathEquals("now+M", "2014-12-18T14:27:32", now, true, null); assertDateMathEquals("now-2d", "2014-11-16T14:27:32", now, false, null); + assertDateMathEquals("now-2d", "2014-11-16T14:27:32", now, true, null); assertDateMathEquals("now/m", "2014-11-18T14:27", now, false, null); + assertDateMathEquals("now/m", "2014-11-18T14:27:59.999Z", now, true, null); + assertDateMathEquals("now/M", "2014-11-01T00:00:00", now, false, null); + assertDateMathEquals("now/M", "2014-11-30T23:59:59.999Z", now, true, null); // timezone does not affect now assertDateMathEquals("now/m", "2014-11-18T14:27", now, false, DateTimeZone.forID("+02:00")); diff --git a/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java b/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java index 2e30b33ab96..2fb52460896 100644 --- a/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java @@ -139,8 +139,13 @@ public class JavaDateMathParserTests extends ESTestCase { assertDateMathEquals("now", "2014-11-18T14:27:32", now, false, null); assertDateMathEquals("now+M", "2014-12-18T14:27:32", now, false, null); + assertDateMathEquals("now+M", "2014-12-18T14:27:32", now, true, null); assertDateMathEquals("now-2d", "2014-11-16T14:27:32", now, false, null); + assertDateMathEquals("now-2d", "2014-11-16T14:27:32", now, true, null); assertDateMathEquals("now/m", "2014-11-18T14:27", now, false, null); + assertDateMathEquals("now/m", "2014-11-18T14:27:59.999Z", now, true, null); + assertDateMathEquals("now/M", "2014-11-01T00:00:00", now, false, null); + assertDateMathEquals("now/M", "2014-11-30T23:59:59.999Z", now, true, null); // timezone does not affect now assertDateMathEquals("now/m", "2014-11-18T14:27", now, false, ZoneId.of("+02:00")); diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 76e519f922e..215ac768818 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -83,7 +83,6 @@ import static org.hamcrest.Matchers.not; /** * Tests various cluster operations (e.g., indexing) during disruptions. */ -@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class ClusterDisruptionIT extends AbstractDisruptionTestCase { @@ -109,6 +108,7 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase { "org.elasticsearch.discovery:TRACE,org.elasticsearch.action.support.replication:TRACE," + "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE") + // TestLogging for https://github.com/elastic/elasticsearch/issues/41068 public void testAckedIndexing() throws Exception { final int seconds = !(TEST_NIGHTLY && rarely()) ? 1 : 5; diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index 4fabbe74848..6bcf74ab9aa 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -34,7 +34,6 @@ import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.disruption.SlowClusterStateProcessing; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; @@ -51,7 +50,6 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; /** * Tests for discovery during disruptions. */ -@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase { diff --git a/server/src/test/java/org/elasticsearch/discovery/DiskDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/DiskDisruptionIT.java new file mode 100644 index 00000000000..dac48d89e78 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/discovery/DiskDisruptionIT.java @@ -0,0 +1,178 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery; + +import com.carrotsearch.randomizedtesting.RandomizedTest; +import org.apache.lucene.mockfile.FilterFileSystemProvider; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.io.PathUtilsForTesting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.test.BackgroundIndexer; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.FileSystem; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.attribute.FileAttribute; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class DiskDisruptionIT extends AbstractDisruptionTestCase { + + private static DisruptTranslogFileSystemProvider disruptTranslogFileSystemProvider; + + @BeforeClass + public static void installDisruptTranslogFS() { + FileSystem current = PathUtils.getDefaultFileSystem(); + disruptTranslogFileSystemProvider = new DisruptTranslogFileSystemProvider(current); + PathUtilsForTesting.installMock(disruptTranslogFileSystemProvider.getFileSystem(null)); + } + + @AfterClass + public static void removeDisruptTranslogFS() { + PathUtilsForTesting.teardown(); + } + + void injectTranslogFailures() { + disruptTranslogFileSystemProvider.injectFailures.set(true); + } + + @After + void stopTranslogFailures() { + disruptTranslogFileSystemProvider.injectFailures.set(false); + } + + static class DisruptTranslogFileSystemProvider extends FilterFileSystemProvider { + + AtomicBoolean injectFailures = new AtomicBoolean(); + + DisruptTranslogFileSystemProvider(FileSystem inner) { + super("disrupttranslog://", inner); + } + + @Override + public FileChannel newFileChannel(Path path, Set options, FileAttribute... attrs) throws IOException { + if (injectFailures.get() && path.toString().endsWith(".ckp")) { + // prevents checkpoint file to be updated + throw new IOException("fake IOException"); + } + return super.newFileChannel(path, options, attrs); + } + + } + + /** + * This test checks that all operations below the global checkpoint are properly persisted. + * It simulates a full power outage by preventing translog checkpoint files to be written and restart the cluster. This means that + * all un-fsynced data will be lost. + */ + public void testGlobalCheckpointIsSafe() throws Exception { + startCluster(rarely() ? 5 : 3); + + final int numberOfShards = 1 + randomInt(2); + assertAcked(prepareCreate("test") + .setSettings(Settings.builder() + .put(indexSettings()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2)) + )); + ensureGreen(); + + AtomicBoolean stopGlobalCheckpointFetcher = new AtomicBoolean(); + + Map shardToGcp = new ConcurrentHashMap<>(); + for (int i = 0; i < numberOfShards; i++) { + shardToGcp.put(i, SequenceNumbers.NO_OPS_PERFORMED); + } + final Thread globalCheckpointSampler = new Thread(() -> { + while (stopGlobalCheckpointFetcher.get() == false) { + try { + for (ShardStats shardStats : client().admin().indices().prepareStats("test").clear().get().getShards()) { + final int shardId = shardStats.getShardRouting().id(); + final long globalCheckpoint = shardStats.getSeqNoStats().getGlobalCheckpoint(); + shardToGcp.compute(shardId, (i, v) -> Math.max(v, globalCheckpoint)); + } + } catch (Exception e) { + // ignore + logger.debug("failed to fetch shard stats", e); + } + } + }); + + globalCheckpointSampler.start(); + + try (BackgroundIndexer indexer = new BackgroundIndexer("test", "_doc", client(), -1, RandomizedTest.scaledRandomIntBetween(2, 5), + false, random())) { + indexer.setRequestTimeout(TimeValue.ZERO); + indexer.setIgnoreIndexingFailures(true); + indexer.setAssertNoFailuresOnStop(false); + indexer.start(-1); + + waitForDocs(randomIntBetween(1, 100), indexer); + + logger.info("injecting failures"); + injectTranslogFailures(); + logger.info("stopping indexing"); + } + + logger.info("full cluster restart"); + internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { + + @Override + public void onAllNodesStopped() { + logger.info("stopping failures"); + stopTranslogFailures(); + } + + }); + + stopGlobalCheckpointFetcher.set(true); + + logger.info("waiting for global checkpoint sampler"); + globalCheckpointSampler.join(); + + logger.info("waiting for green"); + ensureGreen("test"); + + for (ShardStats shardStats : client().admin().indices().prepareStats("test").clear().get().getShards()) { + final int shardId = shardStats.getShardRouting().id(); + final long maxSeqNo = shardStats.getSeqNoStats().getMaxSeqNo(); + if (shardStats.getShardRouting().active()) { + assertThat(maxSeqNo, greaterThanOrEqualTo(shardToGcp.get(shardId))); + } + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 86c706d0081..8f422537b38 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -37,7 +37,6 @@ import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.disruption.SingleNodeDisruption; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.HashSet; @@ -52,7 +51,6 @@ import static org.hamcrest.Matchers.not; /** * Tests relating to the loss of the master. */ -@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class MasterDisruptionIT extends AbstractDisruptionTestCase { diff --git a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index c4655bcf7ce..23194ab5c55 100644 --- a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -65,7 +65,6 @@ import static org.hamcrest.Matchers.equalTo; * Tests relating to the loss of the master, but which work with the default fault detection settings which are rather lenient and will * not detect a master failure too quickly. */ -@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class StableMasterDisruptionIT extends ESIntegTestCase { @@ -174,6 +173,8 @@ public class StableMasterDisruptionIT extends ESIntegTestCase { * Tests that emulates a frozen elected master node that unfreezes and pushes its cluster state to other nodes that already are * following another elected master node. These nodes should reject this cluster state and prevent them from following the stale master. */ + @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") + // TestLogging for https://github.com/elastic/elasticsearch/issues/43392 public void testStaleMasterNotHijackingMajority() throws Exception { final List nodes = internalCluster().startNodes(3, Settings.builder() .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index bf915c67c51..59bbee9f1bb 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -677,7 +677,7 @@ public class InternalEngineTests extends EngineTestCase { } @Override - public long getCheckpoint() { + public long getProcessedCheckpoint() { return localCheckpoint.get(); } } @@ -891,18 +891,18 @@ public class InternalEngineTests extends EngineTestCase { } } maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo(); - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getProcessedLocalCheckpoint())); engine.syncTranslog(); } try (InternalEngine engine = new InternalEngine(config)) { engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); - assertThat(engine.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat(engine.getProcessedLocalCheckpoint(), equalTo(maxSeqNo)); assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(maxSeqNo)); } try (InternalEngine engine = new InternalEngine(config)) { long upToSeqNo = randomLongBetween(globalCheckpoint.get(), maxSeqNo); engine.recoverFromTranslog(translogHandler, upToSeqNo); - assertThat(engine.getLocalCheckpoint(), equalTo(upToSeqNo)); + assertThat(engine.getProcessedLocalCheckpoint(), equalTo(upToSeqNo)); assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(upToSeqNo)); } } @@ -1172,7 +1172,8 @@ public class InternalEngineTests extends EngineTestCase { engine.index(indexForDoc(doc)); boolean inSync = randomBoolean(); if (inSync) { - globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); // to advance persisted local checkpoint + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); } engine.flush(); @@ -1190,7 +1191,7 @@ public class InternalEngineTests extends EngineTestCase { assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 4L : 1L)); assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(4L)); - globalCheckpoint.set(engine.getLocalCheckpoint()); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); engine.flush(true, true); assertThat(engine.getTranslog().currentFileGeneration(), equalTo(5L)); assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(5L)); @@ -1614,7 +1615,7 @@ public class InternalEngineTests extends EngineTestCase { } engine.flush(); - long localCheckpoint = engine.getLocalCheckpoint(); + long localCheckpoint = engine.getProcessedLocalCheckpoint(); globalCheckpoint.set(randomLongBetween(0, localCheckpoint)); engine.syncTranslog(); final long safeCommitCheckpoint; @@ -1702,7 +1703,7 @@ public class InternalEngineTests extends EngineTestCase { } } engine.flush(); - globalCheckpoint.set(randomLongBetween(0, engine.getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(0, engine.getPersistedLocalCheckpoint())); engine.syncTranslog(); final long minSeqNoToRetain; try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) { @@ -1714,7 +1715,7 @@ public class InternalEngineTests extends EngineTestCase { assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); Map ops = readAllOperationsInLucene(engine, mapperService) .stream().collect(Collectors.toMap(Translog.Operation::seqNo, Function.identity())); - for (long seqno = 0; seqno <= engine.getLocalCheckpoint(); seqno++) { + for (long seqno = 0; seqno <= engine.getPersistedLocalCheckpoint(); seqno++) { String msg = "seq# [" + seqno + "], global checkpoint [" + globalCheckpoint + "], retained-ops [" + retainedExtraOps + "]"; if (seqno < minSeqNoToRetain) { Translog.Operation op = ops.get(seqno); @@ -1736,14 +1737,14 @@ public class InternalEngineTests extends EngineTestCase { // If the global checkpoint equals to the local checkpoint, the next force-merge will be a noop // because all deleted documents are expunged in the previous force-merge already. We need to flush // a new segment to make merge happen so that we can verify that all _recovery_source are pruned. - if (globalCheckpoint.get() == engine.getLocalCheckpoint() && liveDocs.isEmpty() == false) { + if (globalCheckpoint.get() == engine.getPersistedLocalCheckpoint() && liveDocs.isEmpty() == false) { String deleteId = randomFrom(liveDocs); engine.delete(new Engine.Delete("test", deleteId, newUid(deleteId), primaryTerm.get())); liveDocsWithSource.remove(deleteId); liveDocs.remove(deleteId); engine.flush(); } - globalCheckpoint.set(engine.getLocalCheckpoint()); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); engine.syncTranslog(); engine.forceMerge(true, 1, false, false, false); assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); @@ -2449,12 +2450,14 @@ public class InternalEngineTests extends EngineTestCase { } } + initialEngine.syncTranslog(); // to advance persisted local checkpoint + if (randomInt(10) < 3) { // only update rarely as we do it every doc replicaLocalCheckpoint = randomIntBetween(Math.toIntExact(replicaLocalCheckpoint), Math.toIntExact(primarySeqNo)); } gcpTracker.updateLocalCheckpoint(primary.allocationId().getId(), - initialEngine.getLocalCheckpoint()); + initialEngine.getPersistedLocalCheckpoint()); gcpTracker.updateLocalCheckpoint(replica.allocationId().getId(), replicaLocalCheckpoint); if (rarely()) { @@ -2468,7 +2471,7 @@ public class InternalEngineTests extends EngineTestCase { globalCheckpoint = gcpTracker.getGlobalCheckpoint(); assertEquals(primarySeqNo, initialEngine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(primarySeqNo, initialEngine.getLocalCheckpoint()); + assertEquals(primarySeqNo, initialEngine.getPersistedLocalCheckpoint()); assertThat(globalCheckpoint, equalTo(replicaLocalCheckpoint)); assertThat( @@ -2502,7 +2505,8 @@ public class InternalEngineTests extends EngineTestCase { // that the committed max seq no is equivalent to what the current primary seq no is, as all data // we have assigned sequence numbers to should be in the commit equalTo(primarySeqNo)); - assertThat(recoveringEngine.getLocalCheckpoint(), equalTo(primarySeqNo)); + assertThat(recoveringEngine.getProcessedLocalCheckpoint(), equalTo(primarySeqNo)); + assertThat(recoveringEngine.getPersistedLocalCheckpoint(), equalTo(primarySeqNo)); assertThat(recoveringEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo)); assertThat(generateNewSeqNo(recoveringEngine), equalTo(primarySeqNo + 1)); } @@ -2819,7 +2823,9 @@ public class InternalEngineTests extends EngineTestCase { try (InternalEngine engine = createEngine(config)) { engine.index(firstIndexRequest); - globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); // to advance persisted local checkpoint + assertEquals(engine.getProcessedLocalCheckpoint(), engine.getPersistedLocalCheckpoint()); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE)); Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); @@ -2981,7 +2987,9 @@ public class InternalEngineTests extends EngineTestCase { final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); engine.index(indexForDoc(doc1)); - globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); // to advance local checkpoint + assertEquals(engine.getProcessedLocalCheckpoint(), engine.getPersistedLocalCheckpoint()); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); throwErrorOnCommit.set(true); FlushFailedEngineException e = expectThrows(FlushFailedEngineException.class, engine::flush); assertThat(e.getCause().getMessage(), equalTo("power's out")); @@ -3041,7 +3049,7 @@ public class InternalEngineTests extends EngineTestCase { } public void testTranslogReplay() throws IOException { - final LongSupplier inSyncGlobalCheckpointSupplier = () -> this.engine.getLocalCheckpoint(); + final LongSupplier inSyncGlobalCheckpointSupplier = () -> this.engine.getProcessedLocalCheckpoint(); final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); @@ -3135,7 +3143,7 @@ public class InternalEngineTests extends EngineTestCase { final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); Translog translog = new Translog( new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), - badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}); translog.add(new Translog.Index("test", "SomeBogusId", 0, primaryTerm.get(), "{}".getBytes(Charset.forName("UTF-8")))); assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); @@ -4177,9 +4185,10 @@ public class InternalEngineTests extends EngineTestCase { } } - assertThat(initialEngine.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint.get())); + assertThat(initialEngine.getProcessedLocalCheckpoint(), equalTo(expectedLocalCheckpoint.get())); assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo((long) (docs - 1))); initialEngine.flush(true, true); + assertEquals(initialEngine.getProcessedLocalCheckpoint(), initialEngine.getPersistedLocalCheckpoint()); latchReference.get().countDown(); for (final Thread thread : threads) { @@ -4188,10 +4197,11 @@ public class InternalEngineTests extends EngineTestCase { } finally { IOUtils.close(initialEngine); } - try (Engine recoveringEngine = new InternalEngine(initialEngine.config())) { + try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())) { recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); recoveringEngine.fillSeqNoGaps(2); - assertThat(recoveringEngine.getLocalCheckpoint(), greaterThanOrEqualTo((long) (docs - 1))); + assertEquals(recoveringEngine.getProcessedLocalCheckpoint(), recoveringEngine.getPersistedLocalCheckpoint()); + assertThat(recoveringEngine.getProcessedLocalCheckpoint(), greaterThanOrEqualTo((long) (docs - 1))); } } @@ -4272,7 +4282,7 @@ public class InternalEngineTests extends EngineTestCase { expectedLocalCheckpoint = numberOfOperations - 1; } - assertThat(engine.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); + assertThat(engine.getProcessedLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); try (Engine.GetResult result = engine.get(new Engine.Get(true, false, "type", "2", uid), searcherFactory)) { assertThat(result.exists(), equalTo(exists)); @@ -4304,12 +4314,12 @@ public class InternalEngineTests extends EngineTestCase { final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get()); final String reason = "filling gaps"; noOpEngine.noOp(new Engine.NoOp(maxSeqNo + 1, primaryTerm.get(), LOCAL_TRANSLOG_RECOVERY, System.nanoTime(), reason)); - assertThat(noOpEngine.getLocalCheckpoint(), equalTo((long) (maxSeqNo + 1))); + assertThat(noOpEngine.getProcessedLocalCheckpoint(), equalTo((long) (maxSeqNo + 1))); assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled)); noOpEngine.noOp( new Engine.NoOp(maxSeqNo + 2, primaryTerm.get(), randomFrom(PRIMARY, REPLICA, PEER_RECOVERY), System.nanoTime(), reason)); - assertThat(noOpEngine.getLocalCheckpoint(), equalTo((long) (maxSeqNo + 2))); + assertThat(noOpEngine.getProcessedLocalCheckpoint(), equalTo((long) (maxSeqNo + 2))); assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled + 1)); // skip to the op that we added to the translog Translog.Operation op; @@ -4528,7 +4538,7 @@ public class InternalEngineTests extends EngineTestCase { engine.flush(); } } - globalCheckpoint.set(randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, engine.getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, engine.getPersistedLocalCheckpoint())); engine.syncTranslog(); prevSeqNoStats = engine.getSeqNoStats(globalCheckpoint.get()); prevDocs = getDocIds(engine, true); @@ -4565,7 +4575,9 @@ public class InternalEngineTests extends EngineTestCase { replicaEngine.index(replicaIndexForDoc(doc, 1, indexResult.getSeqNo(), false)); } } - checkpointOnReplica = replicaEngine.getLocalCheckpoint(); + engine.syncTranslog(); // to advance local checkpoint + replicaEngine.syncTranslog(); // to advance local checkpoint + checkpointOnReplica = replicaEngine.getProcessedLocalCheckpoint(); } finally { IOUtils.close(replicaEngine); } @@ -4573,17 +4585,17 @@ public class InternalEngineTests extends EngineTestCase { boolean flushed = false; AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - Engine recoveringEngine = null; + InternalEngine recoveringEngine = null; try { assertEquals(docs - 1, engine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(docs - 1, engine.getLocalCheckpoint()); + assertEquals(docs - 1, engine.getProcessedLocalCheckpoint()); assertEquals(maxSeqIDOnReplica, replicaEngine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(checkpointOnReplica, replicaEngine.getLocalCheckpoint()); + assertEquals(checkpointOnReplica, replicaEngine.getProcessedLocalCheckpoint()); recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); assertEquals(numDocsOnReplica, getTranslog(recoveringEngine).stats().getUncommittedOperations()); recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(checkpointOnReplica, recoveringEngine.getLocalCheckpoint()); + assertEquals(checkpointOnReplica, recoveringEngine.getProcessedLocalCheckpoint()); assertEquals((maxSeqIDOnReplica + 1) - numDocsOnReplica, recoveringEngine.fillSeqNoGaps(2)); // now snapshot the tlog and ensure the primary term is updated @@ -4599,7 +4611,7 @@ public class InternalEngineTests extends EngineTestCase { } assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getProcessedLocalCheckpoint()); if ((flushed = randomBoolean())) { globalCheckpoint.set(recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); getTranslog(recoveringEngine).sync(); @@ -4618,10 +4630,10 @@ public class InternalEngineTests extends EngineTestCase { } recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getProcessedLocalCheckpoint()); assertEquals(0, recoveringEngine.fillSeqNoGaps(3)); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getProcessedLocalCheckpoint()); } finally { IOUtils.close(recoveringEngine); } @@ -4805,7 +4817,7 @@ public class InternalEngineTests extends EngineTestCase { // Advance the global checkpoint during the flush to create a lag between a persisted global checkpoint in the translog // (this value is visible to the deletion policy) and an in memory global checkpoint in the SequenceNumbersService. if (rarely()) { - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), getPersistedLocalCheckpoint())); } super.commitIndexWriter(writer, translog, syncId); } @@ -4817,7 +4829,7 @@ public class InternalEngineTests extends EngineTestCase { document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); engine.index(indexForDoc(testParsedDocument(Integer.toString(docId), null, document, B_1, null))); if (frequently()) { - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); engine.syncTranslog(); } if (frequently()) { @@ -4957,11 +4969,11 @@ public class InternalEngineTests extends EngineTestCase { engine.flush(false, randomBoolean()); List commits = DirectoryReader.listCommits(store.directory()); // Global checkpoint advanced but not enough - all commits are kept. - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint() - 1)); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint() - 1)); engine.syncTranslog(); assertThat(DirectoryReader.listCommits(store.directory()), equalTo(commits)); // Global checkpoint advanced enough - only the last commit is kept. - globalCheckpoint.set(randomLongBetween(engine.getLocalCheckpoint(), Long.MAX_VALUE)); + globalCheckpoint.set(randomLongBetween(engine.getPersistedLocalCheckpoint(), Long.MAX_VALUE)); engine.syncTranslog(); assertThat(DirectoryReader.listCommits(store.directory()), contains(commits.get(commits.size() - 1))); assertThat(engine.getTranslog().totalOperations(), equalTo(0)); @@ -4986,7 +4998,7 @@ public class InternalEngineTests extends EngineTestCase { for (int i = 0; i < numSnapshots; i++) { snapshots.add(engine.acquireSafeIndexCommit()); // taking snapshots from the safe commit. } - globalCheckpoint.set(engine.getLocalCheckpoint()); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); engine.syncTranslog(); final List commits = DirectoryReader.listCommits(store.directory()); for (int i = 0; i < numSnapshots - 1; i++) { @@ -5067,7 +5079,7 @@ public class InternalEngineTests extends EngineTestCase { engine.onSettingsChanged(); final int numOps = scaledRandomIntBetween(100, 10_000); for (int i = 0; i < numOps; i++) { - final long localCheckPoint = engine.getLocalCheckpoint(); + final long localCheckPoint = engine.getProcessedLocalCheckpoint(); final long seqno = randomLongBetween(Math.max(0, localCheckPoint), localCheckPoint + 5); final ParsedDocument doc = testParsedDocument(Long.toString(seqno), null, testDocumentWithTextField(), SOURCE, null); @@ -5250,8 +5262,8 @@ public class InternalEngineTests extends EngineTestCase { } appendOnlyIndexer.join(120_000); assertThat(engine.getMaxSeqNoOfNonAppendOnlyOperations(), equalTo(maxSeqNoOfNonAppendOnly)); - globalCheckpoint.set(engine.getLocalCheckpoint()); engine.syncTranslog(); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); engine.flush(); } try (InternalEngine engine = createEngine(store, translogPath, globalCheckpoint::get)) { @@ -5435,7 +5447,10 @@ public class InternalEngineTests extends EngineTestCase { } existingSeqNos.add(result.getSeqNo()); if (randomBoolean()) { - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getCheckpoint())); + engine.syncTranslog(); // advance persisted local checkpoint + assertEquals(engine.getProcessedLocalCheckpoint(), engine.getPersistedLocalCheckpoint()); + globalCheckpoint.set( + randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getPersistedCheckpoint())); } if (randomBoolean()) { retentionLeasesVersion.incrementAndGet(); @@ -5499,7 +5514,7 @@ public class InternalEngineTests extends EngineTestCase { latch.countDown(); refreshThreads[i] = new Thread(() -> { while (done.get() == false) { - long checkPointBeforeRefresh = engine.getLocalCheckpoint(); + long checkPointBeforeRefresh = engine.getProcessedLocalCheckpoint(); engine.refresh("test", randomFrom(Engine.SearcherScope.values()), true); assertThat(engine.lastRefreshedCheckpoint(), greaterThanOrEqualTo(checkPointBeforeRefresh)); } @@ -5515,7 +5530,7 @@ public class InternalEngineTests extends EngineTestCase { thread.join(); } engine.refresh("test"); - assertThat(engine.lastRefreshedCheckpoint(), equalTo(engine.getLocalCheckpoint())); + assertThat(engine.lastRefreshedCheckpoint(), equalTo(engine.getProcessedLocalCheckpoint())); } public void testLuceneSnapshotRefreshesOnlyOnce() throws Exception { @@ -5628,8 +5643,8 @@ public class InternalEngineTests extends EngineTestCase { flushedOperations.add(op); applyOperation(engine, op); if (randomBoolean()) { - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); engine.syncTranslog(); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); } if (randomInt(100) < 10) { engine.refresh("test"); @@ -5653,7 +5668,7 @@ public class InternalEngineTests extends EngineTestCase { try (InternalEngine engine = new InternalEngine(config)) { // do not recover from translog final Map deletesAfterCheckpoint = new HashMap<>(); for (Engine.Operation op : operationsInSafeCommit) { - if (op instanceof Engine.NoOp == false && op.seqNo() > engine.getLocalCheckpoint()) { + if (op instanceof Engine.NoOp == false && op.seqNo() > engine.getPersistedLocalCheckpoint()) { deletesAfterCheckpoint.put(new Term(IdFieldMapper.NAME, Uid.encodeId(op.id())).bytes(), op); } } @@ -5674,8 +5689,8 @@ public class InternalEngineTests extends EngineTestCase { final Set seqNosInSafeCommit = operationsInSafeCommit.stream().map(op -> op.seqNo()).collect(Collectors.toSet()); for (Engine.Operation op : operations) { assertThat( - "seq_no=" + op.seqNo() + " max_seq_no=" + tracker.getMaxSeqNo() + " checkpoint=" + tracker.getCheckpoint(), - tracker.contains(op.seqNo()), equalTo(seqNosInSafeCommit.contains(op.seqNo()))); + "seq_no=" + op.seqNo() + " max_seq_no=" + tracker.getMaxSeqNo() + "checkpoint=" + tracker.getProcessedCheckpoint(), + tracker.hasProcessed(op.seqNo()), equalTo(seqNosInSafeCommit.contains(op.seqNo()))); } engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertThat(getDocIds(engine, true), equalTo(docs)); @@ -5695,8 +5710,8 @@ public class InternalEngineTests extends EngineTestCase { config(softDeletesEnabled, store, translogPath, newMergePolicy(), null, null, globalCheckpoint::get))) { List ops = generateHistoryOnReplica(between(1, 100), randomBoolean(), randomBoolean(), randomBoolean()); applyOperations(engine, ops); - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); - engine.syncTranslog(); + engine.syncTranslog(); // to advance persisted checkpoint + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); engine.flush(); docs = getDocIds(engine, true); } @@ -5955,8 +5970,8 @@ public class InternalEngineTests extends EngineTestCase { for (Engine.Operation op : operations) { applyOperation(engine, op); if (randomBoolean()) { - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); engine.syncTranslog(); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); } if (randomInt(100) < 10) { engine.refresh("test"); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java index d1840c4d97c..f6327e8132c 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java @@ -161,7 +161,7 @@ public class LuceneChangesSnapshotTests extends EngineTestCase { int totalOps = 0; for (Engine.Operation op : operations) { // Engine skips deletes or indexes below the local checkpoint - if (engine.getLocalCheckpoint() < op.seqNo() || op instanceof Engine.NoOp) { + if (engine.getProcessedLocalCheckpoint() < op.seqNo() || op instanceof Engine.NoOp) { seqNoToTerm.put(op.seqNo(), op.primaryTerm()); if (op instanceof Engine.Index) { totalOps += ((Engine.Index) op).docs().size(); @@ -228,7 +228,7 @@ public class LuceneChangesSnapshotTests extends EngineTestCase { readyLatch.countDown(); readyLatch.await(); concurrentlyApplyOps(operations, engine); - assertThat(engine.getLocalCheckpointTracker().getCheckpoint(), equalTo(operations.size() - 1L)); + assertThat(engine.getLocalCheckpointTracker().getProcessedCheckpoint(), equalTo(operations.size() - 1L)); isDone.set(true); for (Follower follower : followers) { follower.join(); @@ -237,13 +237,13 @@ public class LuceneChangesSnapshotTests extends EngineTestCase { } class Follower extends Thread { - private final Engine leader; + private final InternalEngine leader; private final InternalEngine engine; private final TranslogHandler translogHandler; private final AtomicBoolean isDone; private final CountDownLatch readLatch; - Follower(Engine leader, AtomicBoolean isDone, CountDownLatch readLatch) throws IOException { + Follower(InternalEngine leader, AtomicBoolean isDone, CountDownLatch readLatch) throws IOException { this.leader = leader; this.isDone = isDone; this.readLatch = readLatch; @@ -252,9 +252,9 @@ public class LuceneChangesSnapshotTests extends EngineTestCase { this.engine = createEngine(createStore(), createTempDir()); } - void pullOperations(Engine follower) throws IOException { - long leaderCheckpoint = leader.getLocalCheckpoint(); - long followerCheckpoint = follower.getLocalCheckpoint(); + void pullOperations(InternalEngine follower) throws IOException { + long leaderCheckpoint = leader.getLocalCheckpointTracker().getProcessedCheckpoint(); + long followerCheckpoint = follower.getLocalCheckpointTracker().getProcessedCheckpoint(); if (followerCheckpoint < leaderCheckpoint) { long fromSeqNo = followerCheckpoint + 1; long batchSize = randomLongBetween(0, 100); @@ -271,7 +271,8 @@ public class LuceneChangesSnapshotTests extends EngineTestCase { readLatch.countDown(); readLatch.await(); while (isDone.get() == false || - engine.getLocalCheckpointTracker().getCheckpoint() < leader.getLocalCheckpoint()) { + engine.getLocalCheckpointTracker().getProcessedCheckpoint() < + leader.getLocalCheckpointTracker().getProcessedCheckpoint()) { pullOperations(engine); } assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); diff --git a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java index de32e3e4307..6f74ac23a8e 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java @@ -85,12 +85,12 @@ public class NoOpEngineTests extends EngineTestCase { flushAndTrimTranslog(engine); - long localCheckpoint = engine.getLocalCheckpoint(); + long localCheckpoint = engine.getPersistedLocalCheckpoint(); long maxSeqNo = engine.getSeqNoStats(100L).getMaxSeqNo(); engine.close(); final NoOpEngine noOpEngine = new NoOpEngine(noOpConfig(INDEX_SETTINGS, store, primaryTranslogDir, tracker)); - assertThat(noOpEngine.getLocalCheckpoint(), equalTo(localCheckpoint)); + assertThat(noOpEngine.getPersistedLocalCheckpoint(), equalTo(localCheckpoint)); assertThat(noOpEngine.getSeqNoStats(100L).getMaxSeqNo(), equalTo(maxSeqNo)); try (Engine.IndexCommitRef ref = noOpEngine.acquireLastIndexCommit(false)) { try (IndexReader reader = DirectoryReader.open(ref.getIndexCommit())) { @@ -114,7 +114,8 @@ public class NoOpEngineTests extends EngineTestCase { if (rarely()) { engine.flush(); } - globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); // advance persisted local checkpoint + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); } for (int i = 0; i < numDocs; i++) { @@ -122,11 +123,12 @@ public class NoOpEngineTests extends EngineTestCase { String delId = Integer.toString(i); Engine.DeleteResult result = engine.delete(new Engine.Delete("test", delId, newUid(delId), primaryTerm.get())); assertTrue(result.isFound()); - globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); // advance persisted local checkpoint + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); deletions += 1; } } - engine.getLocalCheckpointTracker().waitForOpsToComplete(numDocs + deletions - 1); + engine.getLocalCheckpointTracker().waitForProcessedOpsToComplete(numDocs + deletions - 1); flushAndTrimTranslog(engine); } diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java index e0ad514e6db..f01f4c5b8e3 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java @@ -62,15 +62,16 @@ public class ReadOnlyEngineTests extends EngineTestCase { if (rarely()) { engine.flush(); } - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); } engine.syncTranslog(); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); engine.flush(); readOnlyEngine = new ReadOnlyEngine(engine.engineConfig, engine.getSeqNoStats(globalCheckpoint.get()), engine.getTranslogStats(), false, Function.identity()); lastSeqNoStats = engine.getSeqNoStats(globalCheckpoint.get()); lastDocIds = getDocIds(engine, true); - assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); for (int i = 0; i < numDocs; i++) { @@ -94,7 +95,7 @@ public class ReadOnlyEngineTests extends EngineTestCase { IOUtils.close(external, internal); // the locked down engine should still point to the previous commit - assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); try (Engine.GetResult getResult = readOnlyEngine.get(get, readOnlyEngine::acquireSearcher)) { @@ -105,7 +106,7 @@ public class ReadOnlyEngineTests extends EngineTestCase { try (InternalEngine recoveringEngine = new InternalEngine(config)) { recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); // the locked down engine should still point to the previous commit - assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); } @@ -129,9 +130,10 @@ public class ReadOnlyEngineTests extends EngineTestCase { if (rarely()) { engine.flush(); } - globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); // advance persisted local checkpoint + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); } - globalCheckpoint.set(engine.getLocalCheckpoint()); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); engine.syncTranslog(); engine.flushAndClose(); readOnlyEngine = new ReadOnlyEngine(engine.engineConfig, null , null, true, Function.identity()); @@ -155,10 +157,10 @@ public class ReadOnlyEngineTests extends EngineTestCase { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA, System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); - maxSeqNo = engine.getLocalCheckpoint(); + maxSeqNo = engine.getProcessedLocalCheckpoint(); } - globalCheckpoint.set(engine.getLocalCheckpoint() - 1); engine.syncTranslog(); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint() - 1); engine.flushAndClose(); IllegalStateException exception = expectThrows(IllegalStateException.class, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java index 6822298c222..e69cf09de09 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java @@ -135,16 +135,48 @@ public class BooleanFieldMapperTests extends ESSingleNodeTestCase { .endObject() .endObject()); DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); + // omit "false"/"true" here as they should still be parsed correctly + String randomValue = randomFrom("off", "no", "0", "on", "yes", "1"); BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() - // omit "false"/"true" here as they should still be parsed correctly - .field("field", randomFrom("off", "no", "0", "on", "yes", "1")) + .field("field", randomValue) .endObject()); MapperParsingException ex = expectThrows(MapperParsingException.class, () -> defaultMapper.parse(new SourceToParse("test", "type", "1", source, XContentType.JSON))); - assertEquals("failed to parse field [field] of type [boolean] in document with id '1'", ex.getMessage()); + assertEquals("failed to parse field [field] of type [boolean] in document with id '1'. " + + "Preview of field's value: '" + randomValue + "'", ex.getMessage()); } + + public void testParsesBooleansNestedStrict() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "boolean") + .endObject() + .endObject() + .endObject() + .endObject()); + DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); + // omit "false"/"true" here as they should still be parsed correctly + String randomValue = "no"; + BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject() + .startObject("field") + .field("inner_field", randomValue) + .endObject() + .endObject()); + MapperParsingException ex = expectThrows(MapperParsingException.class, + () -> defaultMapper.parse(new SourceToParse("test", "type", "1", source, XContentType.JSON))); + assertEquals("failed to parse field [field] of type [boolean] in document with id '1'. " + + "Preview of field's value: '{inner_field=" + randomValue + "}'", ex.getMessage()); + } + + + + public void testMultiFields() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 1bdf40bcc67..a076a60231f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -386,6 +386,85 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase { assertEquals(DocValuesType.SORTED_SET, fieldType.docValuesType()); } + public void testParsesKeywordNestedEmptyObjectStrict() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject()); + DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); + + BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject() + .startObject("field") + .endObject() + .endObject()); + MapperParsingException ex = expectThrows(MapperParsingException.class, + () -> defaultMapper.parse(new SourceToParse("test", "type", "1", source, XContentType.JSON))); + assertEquals("failed to parse field [field] of type [keyword] in document with id '1'. " + + "Preview of field's value: '{}'", ex.getMessage()); + } + + public void testParsesKeywordNestedListStrict() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject()); + DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); + + BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject() + .startArray("field") + .startObject() + .startArray("array_name") + .value("inner_field_first") + .value("inner_field_second") + .endArray() + .endObject() + .endArray() + .endObject()); + MapperParsingException ex = expectThrows(MapperParsingException.class, + () -> defaultMapper.parse(new SourceToParse("test", "type", "1", source, XContentType.JSON))); + assertEquals("failed to parse field [field] of type [keyword] in document with id '1'. " + + "Preview of field's value: '{array_name=[inner_field_first, inner_field_second]}'", ex.getMessage()); + } + + public void testParsesKeywordNullStrict() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject()); + DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); + + BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject() + .startObject("field") + .nullField("field_name") + .endObject() + .endObject()); + MapperParsingException ex = expectThrows(MapperParsingException.class, + () -> defaultMapper.parse(new SourceToParse("test", "type", "1", source, XContentType.JSON))); + assertEquals("failed to parse field [field] of type [keyword] in document with id '1'. " + + "Preview of field's value: '{field_name=null}'", ex.getMessage()); + } + public void testUpdateNormalizer() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") diff --git a/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java b/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java index 1c3d539263e..c0333cab98c 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java @@ -21,10 +21,19 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.slice.SliceBuilder; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + import static java.util.Collections.emptyMap; import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; +import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; /** @@ -92,4 +101,134 @@ public class ReindexRequestTests extends AbstractBulkByScrollRequestTestCase())); + } + + public void testBuildRemoteInfoFullyLoaded() throws IOException { + Map headers = new HashMap<>(); + headers.put("first", "a"); + headers.put("second", "b"); + headers.put("third", ""); + + Map remote = new HashMap<>(); + remote.put("host", "https://example.com:9200"); + remote.put("username", "testuser"); + remote.put("password", "testpass"); + remote.put("headers", headers); + remote.put("socket_timeout", "90s"); + remote.put("connect_timeout", "10s"); + + Map query = new HashMap<>(); + query.put("a", "b"); + + Map source = new HashMap<>(); + source.put("remote", remote); + source.put("query", query); + + RemoteInfo remoteInfo = ReindexRequest.buildRemoteInfo(source); + assertEquals("https", remoteInfo.getScheme()); + assertEquals("example.com", remoteInfo.getHost()); + assertEquals(9200, remoteInfo.getPort()); + assertEquals("{\n \"a\" : \"b\"\n}", remoteInfo.getQuery().utf8ToString()); + assertEquals("testuser", remoteInfo.getUsername()); + assertEquals("testpass", remoteInfo.getPassword()); + assertEquals(headers, remoteInfo.getHeaders()); + assertEquals(timeValueSeconds(90), remoteInfo.getSocketTimeout()); + assertEquals(timeValueSeconds(10), remoteInfo.getConnectTimeout()); + } + + public void testBuildRemoteInfoWithoutAllParts() throws IOException { + expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("example.com")); + expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase(":9200")); + expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("http://:9200")); + expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("example.com:9200")); + expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("http://example.com")); + } + + public void testBuildRemoteInfoWithAllHostParts() throws IOException { + RemoteInfo info = buildRemoteInfoHostTestCase("http://example.com:9200"); + assertEquals("http", info.getScheme()); + assertEquals("example.com", info.getHost()); + assertEquals(9200, info.getPort()); + assertNull(info.getPathPrefix()); + assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); // Didn't set the timeout so we should get the default + assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); // Didn't set the timeout so we should get the default + + info = buildRemoteInfoHostTestCase("https://other.example.com:9201"); + assertEquals("https", info.getScheme()); + assertEquals("other.example.com", info.getHost()); + assertEquals(9201, info.getPort()); + assertNull(info.getPathPrefix()); + assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); + assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); + + info = buildRemoteInfoHostTestCase("https://[::1]:9201"); + assertEquals("https", info.getScheme()); + assertEquals("[::1]", info.getHost()); + assertEquals(9201, info.getPort()); + assertNull(info.getPathPrefix()); + assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); + assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); + + info = buildRemoteInfoHostTestCase("https://other.example.com:9201/"); + assertEquals("https", info.getScheme()); + assertEquals("other.example.com", info.getHost()); + assertEquals(9201, info.getPort()); + assertEquals("/", info.getPathPrefix()); + assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); + assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); + + info = buildRemoteInfoHostTestCase("https://other.example.com:9201/proxy-path/"); + assertEquals("https", info.getScheme()); + assertEquals("other.example.com", info.getHost()); + assertEquals(9201, info.getPort()); + assertEquals("/proxy-path/", info.getPathPrefix()); + assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); + assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); + + final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> buildRemoteInfoHostTestCase("https")); + assertEquals("[host] must be of the form [scheme]://[host]:[port](/[pathPrefix])? but was [https]", + exception.getMessage()); + } + + public void testReindexFromRemoteRequestParsing() throws IOException { + BytesReference request; + try (XContentBuilder b = JsonXContent.contentBuilder()) { + b.startObject(); { + b.startObject("source"); { + b.startObject("remote"); { + b.field("host", "http://localhost:9200"); + } + b.endObject(); + b.field("index", "source"); + } + b.endObject(); + b.startObject("dest"); { + b.field("index", "dest"); + } + b.endObject(); + } + b.endObject(); + request = BytesReference.bytes(b); + } + try (XContentParser p = createParser(JsonXContent.jsonXContent, request)) { + ReindexRequest r = ReindexRequest.fromXContent(p); + assertEquals("localhost", r.getRemoteInfo().getHost()); + assertArrayEquals(new String[] {"source"}, r.getSearchRequest().indices()); + } + } + + private RemoteInfo buildRemoteInfoHostTestCase(String hostInRest) throws IOException { + Map remote = new HashMap<>(); + remote.put("host", hostInRest); + + Map source = new HashMap<>(); + source.put("remote", remote); + + return ReindexRequest.buildRemoteInfo(source); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 7b1b45a01f1..c94c289f51f 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -596,10 +596,10 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC final long expectedDocs = docs + 2L; assertThat(shards.getPrimary().getLocalCheckpoint(), equalTo(expectedDocs - 1)); // recovery has not completed, therefore the global checkpoint can have advanced on the primary - assertThat(shards.getPrimary().getGlobalCheckpoint(), equalTo(expectedDocs - 1)); + assertThat(shards.getPrimary().getLastKnownGlobalCheckpoint(), equalTo(expectedDocs - 1)); // the pending document is not done, the checkpoints can not have advanced on the replica assertThat(replica.getLocalCheckpoint(), lessThan(expectedDocs - 1)); - assertThat(replica.getGlobalCheckpoint(), lessThan(expectedDocs - 1)); + assertThat(replica.getLastKnownGlobalCheckpoint(), lessThan(expectedDocs - 1)); } // wait for recovery to enter the translog phase @@ -612,9 +612,9 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC final long expectedDocs = docs + 3L; assertThat(shards.getPrimary().getLocalCheckpoint(), equalTo(expectedDocs - 1)); // recovery is now in the process of being completed, therefore the global checkpoint can not have advanced on the primary - assertThat(shards.getPrimary().getGlobalCheckpoint(), equalTo(expectedDocs - 2)); + assertThat(shards.getPrimary().getLastKnownGlobalCheckpoint(), equalTo(expectedDocs - 2)); assertThat(replica.getLocalCheckpoint(), lessThan(expectedDocs - 2)); - assertThat(replica.getGlobalCheckpoint(), lessThan(expectedDocs - 2)); + assertThat(replica.getLastKnownGlobalCheckpoint(), lessThan(expectedDocs - 2)); } replicaEngineFactory.releaseLatchedIndexers(); @@ -624,10 +624,10 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC final long expectedDocs = docs + 3L; assertBusy(() -> { assertThat(shards.getPrimary().getLocalCheckpoint(), equalTo(expectedDocs - 1)); - assertThat(shards.getPrimary().getGlobalCheckpoint(), equalTo(expectedDocs - 1)); + assertThat(shards.getPrimary().getLastKnownGlobalCheckpoint(), equalTo(expectedDocs - 1)); assertThat(replica.getLocalCheckpoint(), equalTo(expectedDocs - 1)); // the global checkpoint advances can only advance here if a background global checkpoint sync fires - assertThat(replica.getGlobalCheckpoint(), anyOf(equalTo(expectedDocs - 1), equalTo(expectedDocs - 2))); + assertThat(replica.getLastKnownGlobalCheckpoint(), anyOf(equalTo(expectedDocs - 1), equalTo(expectedDocs - 2))); }); } } @@ -762,7 +762,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC } shards.refresh("test"); List docsBelowGlobalCheckpoint = EngineTestCase.getDocIds(getEngine(newPrimary), randomBoolean()) - .stream().filter(doc -> doc.getSeqNo() <= newPrimary.getGlobalCheckpoint()).collect(Collectors.toList()); + .stream().filter(doc -> doc.getSeqNo() <= newPrimary.getLastKnownGlobalCheckpoint()).collect(Collectors.toList()); CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean done = new AtomicBoolean(); Thread thread = new Thread(() -> { diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java index cec3c05b284..79b9b231b48 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -100,7 +100,7 @@ public class GlobalCheckpointSyncActionTests extends ESTestCase { lastSyncedGlobalCheckpoint = globalCheckpoint; } - when(indexShard.getGlobalCheckpoint()).thenReturn(globalCheckpoint); + when(indexShard.getLastKnownGlobalCheckpoint()).thenReturn(globalCheckpoint); when(indexShard.getLastSyncedGlobalCheckpoint()).thenReturn(lastSyncedGlobalCheckpoint); final GlobalCheckpointSyncAction action = new GlobalCheckpointSyncAction( diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java index 4f4f39c6146..8d148a74ea9 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java @@ -30,6 +30,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -58,6 +60,28 @@ public class GlobalCheckpointSyncIT extends ESIntegTestCase { .collect(Collectors.toList()); } + public void testGlobalCheckpointSyncWithAsyncDurability() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate( + "test", + Settings.builder() + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC) + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put("index.number_of_replicas", 1)) + .get(); + + for (int j = 0; j < 10; j++) { + final String id = Integer.toString(j); + client().prepareIndex("test", "test", id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); + } + + assertBusy(() -> { + SeqNoStats seqNoStats = client().admin().indices().prepareStats("test").get().getIndex("test").getShards()[0].getSeqNoStats(); + assertThat(seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + }); + } + public void testPostOperationGlobalCheckpointSync() throws Exception { // set the sync interval high so it does not execute during this test runGlobalCheckpointSyncTest(TimeValue.timeValueHours(24), client -> {}, client -> {}); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java b/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java index 44b3794ea6d..a11e29097cc 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java @@ -55,43 +55,79 @@ public class LocalCheckpointTrackerTests extends ESTestCase { tracker = createEmptyTracker(); } - public void testSimplePrimary() { + public void testSimplePrimaryProcessed() { long seqNo1, seqNo2; - assertThat(tracker.getCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); seqNo1 = tracker.generateSeqNo(); assertThat(seqNo1, equalTo(0L)); - tracker.markSeqNoAsCompleted(seqNo1); - assertThat(tracker.getCheckpoint(), equalTo(0L)); - assertThat(tracker.contains(0L), equalTo(true)); - assertThat(tracker.contains(atLeast(1)), equalTo(false)); + tracker.markSeqNoAsProcessed(seqNo1); + assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); + assertThat(tracker.hasProcessed(0L), equalTo(true)); + assertThat(tracker.hasProcessed(atLeast(1)), equalTo(false)); seqNo1 = tracker.generateSeqNo(); seqNo2 = tracker.generateSeqNo(); assertThat(seqNo1, equalTo(1L)); assertThat(seqNo2, equalTo(2L)); - tracker.markSeqNoAsCompleted(seqNo2); - assertThat(tracker.getCheckpoint(), equalTo(0L)); - assertThat(tracker.contains(seqNo1), equalTo(false)); - assertThat(tracker.contains(seqNo2), equalTo(true)); - tracker.markSeqNoAsCompleted(seqNo1); - assertThat(tracker.getCheckpoint(), equalTo(2L)); - assertThat(tracker.contains(between(0, 2)), equalTo(true)); - assertThat(tracker.contains(atLeast(3)), equalTo(false)); + tracker.markSeqNoAsProcessed(seqNo2); + assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); + assertThat(tracker.hasProcessed(seqNo1), equalTo(false)); + assertThat(tracker.hasProcessed(seqNo2), equalTo(true)); + tracker.markSeqNoAsProcessed(seqNo1); + assertThat(tracker.getProcessedCheckpoint(), equalTo(2L)); + assertThat(tracker.hasProcessed(between(0, 2)), equalTo(true)); + assertThat(tracker.hasProcessed(atLeast(3)), equalTo(false)); + assertThat(tracker.getPersistedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.getMaxSeqNo(), equalTo(2L)); } - public void testSimpleReplica() { - assertThat(tracker.getCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); - assertThat(tracker.contains(randomNonNegativeLong()), equalTo(false)); - tracker.markSeqNoAsCompleted(0L); - assertThat(tracker.getCheckpoint(), equalTo(0L)); - assertThat(tracker.contains(0), equalTo(true)); - tracker.markSeqNoAsCompleted(2L); - assertThat(tracker.getCheckpoint(), equalTo(0L)); - assertThat(tracker.contains(1L), equalTo(false)); - assertThat(tracker.contains(2L), equalTo(true)); - tracker.markSeqNoAsCompleted(1L); - assertThat(tracker.getCheckpoint(), equalTo(2L)); - assertThat(tracker.contains(between(0, 2)), equalTo(true)); - assertThat(tracker.contains(atLeast(3)), equalTo(false)); + public void testSimplePrimaryPersisted() { + long seqNo1, seqNo2; + assertThat(tracker.getPersistedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + seqNo1 = tracker.generateSeqNo(); + assertThat(seqNo1, equalTo(0L)); + tracker.markSeqNoAsPersisted(seqNo1); + assertThat(tracker.getPersistedCheckpoint(), equalTo(0L)); + seqNo1 = tracker.generateSeqNo(); + seqNo2 = tracker.generateSeqNo(); + assertThat(seqNo1, equalTo(1L)); + assertThat(seqNo2, equalTo(2L)); + tracker.markSeqNoAsPersisted(seqNo2); + assertThat(tracker.getPersistedCheckpoint(), equalTo(0L)); + tracker.markSeqNoAsPersisted(seqNo1); + assertThat(tracker.getPersistedCheckpoint(), equalTo(2L)); + assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.getMaxSeqNo(), equalTo(2L)); + } + + public void testSimpleReplicaProcessed() { + assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.hasProcessed(randomNonNegativeLong()), equalTo(false)); + tracker.markSeqNoAsProcessed(0L); + assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); + assertThat(tracker.hasProcessed(0), equalTo(true)); + tracker.markSeqNoAsProcessed(2L); + assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); + assertThat(tracker.hasProcessed(1L), equalTo(false)); + assertThat(tracker.hasProcessed(2L), equalTo(true)); + tracker.markSeqNoAsProcessed(1L); + assertThat(tracker.getProcessedCheckpoint(), equalTo(2L)); + assertThat(tracker.hasProcessed(between(0, 2)), equalTo(true)); + assertThat(tracker.hasProcessed(atLeast(3)), equalTo(false)); + assertThat(tracker.getPersistedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.getMaxSeqNo(), equalTo(2L)); + } + + public void testSimpleReplicaPersisted() { + assertThat(tracker.getPersistedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.hasProcessed(randomNonNegativeLong()), equalTo(false)); + tracker.markSeqNoAsPersisted(0L); + assertThat(tracker.getPersistedCheckpoint(), equalTo(0L)); + tracker.markSeqNoAsPersisted(2L); + assertThat(tracker.getPersistedCheckpoint(), equalTo(0L)); + tracker.markSeqNoAsPersisted(1L); + assertThat(tracker.getPersistedCheckpoint(), equalTo(2L)); + assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.getMaxSeqNo(), equalTo(2L)); } public void testLazyInitialization() { @@ -100,10 +136,10 @@ public class LocalCheckpointTrackerTests extends ESTestCase { * sequence numbers this could lead to excessive memory usage resulting in out of memory errors. */ long seqNo = randomNonNegativeLong(); - tracker.markSeqNoAsCompleted(seqNo); + tracker.markSeqNoAsProcessed(seqNo); assertThat(tracker.processedSeqNo.size(), equalTo(1)); - assertThat(tracker.contains(seqNo), equalTo(true)); - assertThat(tracker.contains(randomValueOtherThan(seqNo, ESTestCase::randomNonNegativeLong)), equalTo(false)); + assertThat(tracker.hasProcessed(seqNo), equalTo(true)); + assertThat(tracker.hasProcessed(randomValueOtherThan(seqNo, ESTestCase::randomNonNegativeLong)), equalTo(false)); assertThat(tracker.processedSeqNo.size(), equalTo(1)); } @@ -117,16 +153,16 @@ public class LocalCheckpointTrackerTests extends ESTestCase { } Collections.shuffle(seqNoList, random()); for (Long seqNo : seqNoList) { - tracker.markSeqNoAsCompleted(seqNo); + tracker.markSeqNoAsProcessed(seqNo); } - assertThat(tracker.checkpoint, equalTo(maxOps - 1L)); + assertThat(tracker.processedCheckpoint.get(), equalTo(maxOps - 1L)); assertThat(tracker.processedSeqNo.size(), equalTo(aligned ? 0 : 1)); if (aligned == false) { - assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.checkpoint / BIT_SET_SIZE)); + assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.processedCheckpoint.get() / BIT_SET_SIZE)); } - assertThat(tracker.contains(randomFrom(seqNoList)), equalTo(true)); + assertThat(tracker.hasProcessed(randomFrom(seqNoList)), equalTo(true)); final long notCompletedSeqNo = randomValueOtherThanMany(seqNoList::contains, ESTestCase::randomNonNegativeLong); - assertThat(tracker.contains(notCompletedSeqNo), equalTo(false)); + assertThat(tracker.hasProcessed(notCompletedSeqNo), equalTo(false)); } public void testConcurrentPrimary() throws InterruptedException { @@ -151,7 +187,7 @@ public class LocalCheckpointTrackerTests extends ESTestCase { long seqNo = tracker.generateSeqNo(); logger.info("[t{}] started [{}]", threadId, seqNo); if (seqNo != unFinishedSeq) { - tracker.markSeqNoAsCompleted(seqNo); + tracker.markSeqNoAsProcessed(seqNo); logger.info("[t{}] completed [{}]", threadId, seqNo); } } @@ -163,12 +199,12 @@ public class LocalCheckpointTrackerTests extends ESTestCase { thread.join(); } assertThat(tracker.getMaxSeqNo(), equalTo(maxOps - 1L)); - assertThat(tracker.getCheckpoint(), equalTo(unFinishedSeq - 1L)); - tracker.markSeqNoAsCompleted(unFinishedSeq); - assertThat(tracker.getCheckpoint(), equalTo(maxOps - 1L)); + assertThat(tracker.getProcessedCheckpoint(), equalTo(unFinishedSeq - 1L)); + tracker.markSeqNoAsProcessed(unFinishedSeq); + assertThat(tracker.getProcessedCheckpoint(), equalTo(maxOps - 1L)); assertThat(tracker.processedSeqNo.size(), isOneOf(0, 1)); if (tracker.processedSeqNo.size() == 1) { - assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.checkpoint / BIT_SET_SIZE)); + assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.processedCheckpoint.get() / BIT_SET_SIZE)); } } @@ -202,7 +238,7 @@ public class LocalCheckpointTrackerTests extends ESTestCase { Integer[] ops = seqNoPerThread[threadId]; for (int seqNo : ops) { if (seqNo != unFinishedSeq) { - tracker.markSeqNoAsCompleted(seqNo); + tracker.markSeqNoAsProcessed(seqNo); logger.info("[t{}] completed [{}]", threadId, seqNo); } } @@ -214,15 +250,15 @@ public class LocalCheckpointTrackerTests extends ESTestCase { thread.join(); } assertThat(tracker.getMaxSeqNo(), equalTo(maxOps - 1L)); - assertThat(tracker.getCheckpoint(), equalTo(unFinishedSeq - 1L)); - assertThat(tracker.contains(unFinishedSeq), equalTo(false)); - tracker.markSeqNoAsCompleted(unFinishedSeq); - assertThat(tracker.getCheckpoint(), equalTo(maxOps - 1L)); - assertThat(tracker.contains(unFinishedSeq), equalTo(true)); - assertThat(tracker.contains(randomLongBetween(maxOps, Long.MAX_VALUE)), equalTo(false)); + assertThat(tracker.getProcessedCheckpoint(), equalTo(unFinishedSeq - 1L)); + assertThat(tracker.hasProcessed(unFinishedSeq), equalTo(false)); + tracker.markSeqNoAsProcessed(unFinishedSeq); + assertThat(tracker.getProcessedCheckpoint(), equalTo(maxOps - 1L)); + assertThat(tracker.hasProcessed(unFinishedSeq), equalTo(true)); + assertThat(tracker.hasProcessed(randomLongBetween(maxOps, Long.MAX_VALUE)), equalTo(false)); assertThat(tracker.processedSeqNo.size(), isOneOf(0, 1)); if (tracker.processedSeqNo.size() == 1) { - assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.checkpoint / BIT_SET_SIZE)); + assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.processedCheckpoint.get() / BIT_SET_SIZE)); } } @@ -234,7 +270,7 @@ public class LocalCheckpointTrackerTests extends ESTestCase { try { // sychronize starting with the test thread barrier.await(); - tracker.waitForOpsToComplete(seqNo); + tracker.waitForProcessedOpsToComplete(seqNo); complete.set(true); // synchronize with the test thread checking if we are no longer waiting barrier.await(); @@ -251,11 +287,11 @@ public class LocalCheckpointTrackerTests extends ESTestCase { final List elements = IntStream.rangeClosed(0, seqNo).boxed().collect(Collectors.toList()); Randomness.shuffle(elements); for (int i = 0; i < elements.size() - 1; i++) { - tracker.markSeqNoAsCompleted(elements.get(i)); + tracker.markSeqNoAsProcessed(elements.get(i)); assertFalse(complete.get()); } - tracker.markSeqNoAsCompleted(elements.get(elements.size() - 1)); + tracker.markSeqNoAsProcessed(elements.get(elements.size() - 1)); // synchronize with the waiting thread to mark that it is complete barrier.await(); assertTrue(complete.get()); @@ -268,17 +304,17 @@ public class LocalCheckpointTrackerTests extends ESTestCase { final long localCheckpoint = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, maxSeqNo); final LocalCheckpointTracker tracker = new LocalCheckpointTracker(maxSeqNo, localCheckpoint); if (localCheckpoint >= 0) { - assertThat(tracker.contains(randomLongBetween(0, localCheckpoint)), equalTo(true)); + assertThat(tracker.hasProcessed(randomLongBetween(0, localCheckpoint)), equalTo(true)); } - assertThat(tracker.contains(randomLongBetween(localCheckpoint + 1, Long.MAX_VALUE)), equalTo(false)); + assertThat(tracker.hasProcessed(randomLongBetween(localCheckpoint + 1, Long.MAX_VALUE)), equalTo(false)); final int numOps = between(1, 100); final List seqNos = new ArrayList<>(); for (int i = 0; i < numOps; i++) { long seqNo = randomLongBetween(0, 1000); seqNos.add(seqNo); - tracker.markSeqNoAsCompleted(seqNo); + tracker.markSeqNoAsProcessed(seqNo); } final long seqNo = randomNonNegativeLong(); - assertThat(tracker.contains(seqNo), equalTo(seqNo <= localCheckpoint || seqNos.contains(seqNo))); + assertThat(tracker.hasProcessed(seqNo), equalTo(seqNo <= localCheckpoint || seqNos.contains(seqNo))); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 5febd735f8f..9bef8974563 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -419,7 +419,7 @@ public class IndexShardTests extends IndexShardTestCase { } indexShard.acquireReplicaOperationPermit( indexShard.getPendingPrimaryTerm(), - indexShard.getGlobalCheckpoint(), + indexShard.getLastKnownGlobalCheckpoint(), indexShard.getMaxSeqNoOfUpdatesOrDeletes(), new ActionListener() { @Override @@ -717,7 +717,7 @@ public class IndexShardTests extends IndexShardTestCase { if (Assertions.ENABLED && indexShard.routingEntry().isRelocationTarget() == false) { assertThat(expectThrows(AssertionError.class, () -> indexShard.acquireReplicaOperationPermit(pendingPrimaryTerm, - indexShard.getGlobalCheckpoint(), indexShard.getMaxSeqNoOfUpdatesOrDeletes(), new ActionListener() { + indexShard.getLastKnownGlobalCheckpoint(), indexShard.getMaxSeqNoOfUpdatesOrDeletes(), new ActionListener() { @Override public void onResponse(Releasable releasable) { fail(); @@ -843,7 +843,7 @@ public class IndexShardTests extends IndexShardTestCase { private Releasable acquireReplicaOperationPermitBlockingly(IndexShard indexShard, long opPrimaryTerm) throws ExecutionException, InterruptedException { PlainActionFuture fut = new PlainActionFuture<>(); - indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getGlobalCheckpoint(), + indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getLastKnownGlobalCheckpoint(), randomNonNegativeLong(), fut, ThreadPool.Names.WRITE, ""); return fut.get(); } @@ -922,18 +922,19 @@ public class IndexShardTests extends IndexShardTestCase { final long newPrimaryTerm = primaryTerm + 1 + randomInt(20); if (engineClosed == false) { assertThat(indexShard.getLocalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); - assertThat(indexShard.getGlobalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(indexShard.getLastKnownGlobalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); } final long newGlobalCheckPoint; if (engineClosed || randomBoolean()) { newGlobalCheckPoint = SequenceNumbers.NO_OPS_PERFORMED; } else { - long localCheckPoint = indexShard.getGlobalCheckpoint() + randomInt(100); + long localCheckPoint = indexShard.getLastKnownGlobalCheckpoint() + randomInt(100); // advance local checkpoint for (int i = 0; i <= localCheckPoint; i++) { indexShard.markSeqNoAsNoop(i, "dummy doc"); } - newGlobalCheckPoint = randomIntBetween((int) indexShard.getGlobalCheckpoint(), (int) localCheckPoint); + indexShard.sync(); // advance local checkpoint + newGlobalCheckPoint = randomIntBetween((int) indexShard.getLastKnownGlobalCheckpoint(), (int) localCheckPoint); } final long expectedLocalCheckpoint; if (newGlobalCheckPoint == UNASSIGNED_SEQ_NO) { @@ -954,7 +955,7 @@ public class IndexShardTests extends IndexShardTestCase { assertThat(indexShard.getPendingPrimaryTerm(), equalTo(newPrimaryTerm)); assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm)); assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); - assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); + assertThat(indexShard.getLastKnownGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); onResponse.set(true); releasable.close(); finish(); @@ -1023,7 +1024,7 @@ public class IndexShardTests extends IndexShardTestCase { // and one after replaying translog (upto the global checkpoint); otherwise we roll translog once. either(equalTo(translogGen + 1)).or(equalTo(translogGen + 2))); assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); - assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); + assertThat(indexShard.getLastKnownGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); } } thread.join(); @@ -1051,7 +1052,7 @@ public class IndexShardTests extends IndexShardTestCase { }; final long oldPrimaryTerm = indexShard.getPendingPrimaryTerm() - 1; - randomReplicaOperationPermitAcquisition(indexShard, oldPrimaryTerm, indexShard.getGlobalCheckpoint(), + randomReplicaOperationPermitAcquisition(indexShard, oldPrimaryTerm, indexShard.getLastKnownGlobalCheckpoint(), randomNonNegativeLong(), onLockAcquired, ""); latch.await(); assertFalse(onResponse.get()); @@ -1072,7 +1073,7 @@ public class IndexShardTests extends IndexShardTestCase { long newMaxSeqNoOfUpdates = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); PlainActionFuture fut = new PlainActionFuture<>(); - randomReplicaOperationPermitAcquisition(replica, replica.getOperationPrimaryTerm(), replica.getGlobalCheckpoint(), + randomReplicaOperationPermitAcquisition(replica, replica.getOperationPrimaryTerm(), replica.getLastKnownGlobalCheckpoint(), newMaxSeqNoOfUpdates, fut, ""); try (Releasable ignored = fut.actionGet()) { assertThat(replica.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Math.max(currentMaxSeqNoOfUpdates, newMaxSeqNoOfUpdates))); @@ -1116,19 +1117,22 @@ public class IndexShardTests extends IndexShardTestCase { final String replicaAllocationId = replicaShard.routingEntry().allocationId().getId(); primaryShard.updateLocalCheckpointForShard(replicaAllocationId, replicaLocalCheckpoint); - // initialize the local knowledge on the primary of the global checkpoint on the replica shard - final int replicaGlobalCheckpoint = - randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), Math.toIntExact(primaryShard.getGlobalCheckpoint())); + // initialize the local knowledge on the primary of the persisted global checkpoint on the replica shard + final int replicaGlobalCheckpoint = randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), + Math.toIntExact(primaryShard.getLastKnownGlobalCheckpoint())); primaryShard.updateGlobalCheckpointForShard(replicaAllocationId, replicaGlobalCheckpoint); + // initialize the local knowledge on the primary of the persisted global checkpoint on the primary + primaryShard.updateGlobalCheckpointForShard(shardRouting.allocationId().getId(), primaryShard.getLastKnownGlobalCheckpoint()); + // simulate a background maybe sync; it should only run if the knowledge on the replica of the global checkpoint lags the primary primaryShard.maybeSyncGlobalCheckpoint("test"); assertThat( synced.get(), - equalTo(maxSeqNo == primaryShard.getGlobalCheckpoint() && (replicaGlobalCheckpoint < checkpoint))); + equalTo(maxSeqNo == primaryShard.getLastKnownGlobalCheckpoint() && (replicaGlobalCheckpoint < checkpoint))); // simulate that the background sync advanced the global checkpoint on the replica - primaryShard.updateGlobalCheckpointForShard(replicaAllocationId, primaryShard.getGlobalCheckpoint()); + primaryShard.updateGlobalCheckpointForShard(replicaAllocationId, primaryShard.getLastKnownGlobalCheckpoint()); // reset our boolean so that we can assert after another simulated maybe sync synced.set(false); @@ -1289,7 +1293,7 @@ public class IndexShardTests extends IndexShardTestCase { } indexShard.acquireReplicaOperationPermit( primaryTerm + increment, - indexShard.getGlobalCheckpoint(), + indexShard.getLastKnownGlobalCheckpoint(), randomNonNegativeLong(), new ActionListener() { @Override @@ -1990,6 +1994,7 @@ public class IndexShardTests extends IndexShardTestCase { new SourceToParse(shard.shardId().getIndexName(), "_doc", "id-2", new BytesArray("{}"), XContentType.JSON)); shard.applyIndexOperationOnReplica(5, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, new SourceToParse(shard.shardId().getIndexName(), "_doc", "id-5", new BytesArray("{}"), XContentType.JSON)); + shard.sync(); // advance local checkpoint final int translogOps; if (randomBoolean()) { @@ -2921,6 +2926,8 @@ public class IndexShardTests extends IndexShardTestCase { // Need to update and sync the global checkpoint as the soft-deletes retention MergePolicy depends on it. if (indexShard.indexSettings.isSoftDeleteEnabled()) { if (indexShard.routingEntry().primary()) { + indexShard.updateLocalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), + indexShard.getLocalCheckpoint()); indexShard.updateGlobalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), indexShard.getLocalCheckpoint()); } else { @@ -3306,6 +3313,7 @@ public class IndexShardTests extends IndexShardTestCase { indexShard.flush(new FlushRequest()); } } + indexShard.sync(); // advance local checkpoint assert localCheckpoint == indexShard.getLocalCheckpoint(); assert !gap || (localCheckpoint != max); return new Result(localCheckpoint, max); @@ -3753,7 +3761,7 @@ public class IndexShardTests extends IndexShardTestCase { IndexShard shard = newStartedShard(false); indexOnReplicaWithGaps(shard, between(0, 1000), Math.toIntExact(shard.getLocalCheckpoint())); long maxSeqNoBeforeRollback = shard.seqNoStats().getMaxSeqNo(); - final long globalCheckpoint = randomLongBetween(shard.getGlobalCheckpoint(), shard.getLocalCheckpoint()); + final long globalCheckpoint = randomLongBetween(shard.getLastKnownGlobalCheckpoint(), shard.getLocalCheckpoint()); shard.updateGlobalCheckpointOnReplica(globalCheckpoint, "test"); Set docBelowGlobalCheckpoint = getShardDocUIDs(shard).stream() .filter(id -> Long.parseLong(id) <= globalCheckpoint).collect(Collectors.toSet()); @@ -3837,7 +3845,7 @@ public class IndexShardTests extends IndexShardTestCase { closeShardThread.start(); final CountDownLatch engineResetLatch = new CountDownLatch(1); - shard.acquireAllReplicaOperationsPermits(shard.getOperationPrimaryTerm(), shard.getGlobalCheckpoint(), 0L, + shard.acquireAllReplicaOperationsPermits(shard.getOperationPrimaryTerm(), shard.getLastKnownGlobalCheckpoint(), 0L, ActionListener.wrap(r -> { try (Releasable dummy = r) { shard.resetEngineToGlobalCheckpoint(); @@ -3877,7 +3885,7 @@ public class IndexShardTests extends IndexShardTestCase { }); indexOnReplicaWithGaps(shard, between(0, 1000), Math.toIntExact(shard.getLocalCheckpoint())); - final long globalCheckpoint = randomLongBetween(shard.getGlobalCheckpoint(), shard.getLocalCheckpoint()); + final long globalCheckpoint = randomLongBetween(shard.getLastKnownGlobalCheckpoint(), shard.getLocalCheckpoint()); shard.updateGlobalCheckpointOnReplica(globalCheckpoint, "test"); Thread snapshotThread = new Thread(() -> { @@ -3900,7 +3908,7 @@ public class IndexShardTests extends IndexShardTestCase { snapshotThread.start(); final CountDownLatch engineResetLatch = new CountDownLatch(1); - shard.acquireAllReplicaOperationsPermits(shard.getOperationPrimaryTerm(), shard.getGlobalCheckpoint(), 0L, + shard.acquireAllReplicaOperationsPermits(shard.getOperationPrimaryTerm(), shard.getLastKnownGlobalCheckpoint(), 0L, ActionListener.wrap(r -> { try (Releasable dummy = r) { shard.resetEngineToGlobalCheckpoint(); @@ -3924,7 +3932,7 @@ public class IndexShardTests extends IndexShardTestCase { for (int i = 0; i < nbTermUpdates; i++) { long opPrimaryTerm = replica.getOperationPrimaryTerm() + 1; - final long globalCheckpoint = replica.getGlobalCheckpoint(); + final long globalCheckpoint = replica.getLastKnownGlobalCheckpoint(); final long maxSeqNoOfUpdatesOrDeletes = replica.getMaxSeqNoOfUpdatesOrDeletes(); final int operations = scaledRandomIntBetween(5, 32); diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index e0825445bb8..481aaa233ca 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -101,7 +101,7 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase { shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId), new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build()); shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint); - assertEquals(globalCheckPoint, shard.getGlobalCheckpoint()); + assertEquals(globalCheckPoint, shard.getLastKnownGlobalCheckpoint()); logger.info("Total ops: {}, global checkpoint: {}", numDocs, globalCheckPoint); @@ -197,7 +197,7 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase { public void testDoNotSendOperationsWithoutSequenceNumber() throws Exception { IndexShard shard = spy(newStartedShard(true)); - when(shard.getGlobalCheckpoint()).thenReturn(SequenceNumbers.UNASSIGNED_SEQ_NO); + when(shard.getLastKnownGlobalCheckpoint()).thenReturn(SequenceNumbers.UNASSIGNED_SEQ_NO); int numOps = between(0, 20); List operations = new ArrayList<>(); for (int i = 0; i < numOps; i++) { diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index cd0c90f5077..3ca29b6b375 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -160,7 +160,9 @@ public class CorruptedFileIT extends ESIntegTestCase { } indexRandom(true, builders); ensureGreen(); - assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet()); + // double flush to create safe commit in case of async durability + assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).get()); + assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).get()); // we have to flush at least once here since we don't corrupt the translog SearchResponse countResponse = client().prepareSearch().setSize(0).get(); assertHitCount(countResponse, numDocs); @@ -264,7 +266,9 @@ public class CorruptedFileIT extends ESIntegTestCase { } indexRandom(true, builders); ensureGreen(); - assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet()); + // double flush to create safe commit in case of async durability + assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).get()); + assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).get()); // we have to flush at least once here since we don't corrupt the translog SearchResponse countResponse = client().prepareSearch().setSize(0).get(); assertHitCount(countResponse, numDocs); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java index c8d4dbd43df..da339ff5c8e 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java @@ -171,7 +171,7 @@ public class TranslogDeletionPolicyTests extends ESTestCase { } writer = TranslogWriter.create(new ShardId("index", "uuid", 0), translogUUID, gen, tempDir.resolve(Translog.getFilename(gen)), FileChannel::open, TranslogConfig.DEFAULT_BUFFER_SIZE, 1L, 1L, () -> 1L, - () -> 1L, randomNonNegativeLong(), new TragicExceptionHolder()); + () -> 1L, randomNonNegativeLong(), new TragicExceptionHolder(), seqNo -> {}); writer = Mockito.spy(writer); Mockito.doReturn(now - (numberOfReaders - gen + 1) * 1000).when(writer).getLastModifiedTime(); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index f2401505cba..c99fee9dcb8 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -113,6 +113,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongConsumer; import java.util.function.LongSupplier; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -147,6 +148,7 @@ public class TranslogTests extends ESTestCase { protected Path translogDir; // A default primary term is used by translog instances created in this test. private final AtomicLong primaryTerm = new AtomicLong(); + private final AtomicReference persistedSeqNoConsumer = new AtomicReference<>(); @Override protected void afterIfSuccessful() throws Exception { @@ -165,16 +167,25 @@ public class TranslogTests extends ESTestCase { } + private LongConsumer getPersistedSeqNoConsumer() { + return seqNo -> { + final LongConsumer consumer = persistedSeqNoConsumer.get(); + if (consumer != null) { + consumer.accept(seqNo); + } + }; + } + protected Translog createTranslog(TranslogConfig config) throws IOException { String translogUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, getPersistedSeqNoConsumer()); } protected Translog openTranslog(TranslogConfig config, String translogUUID) throws IOException { return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, getPersistedSeqNoConsumer()); } @@ -226,7 +237,8 @@ public class TranslogTests extends ESTestCase { final TranslogConfig translogConfig = getTranslogConfig(path); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); - return new Translog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get); + return new Translog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get, + getPersistedSeqNoConsumer()); } private TranslogConfig getTranslogConfig(final Path path) { @@ -982,7 +994,7 @@ public class TranslogTests extends ESTestCase { throw new AssertionError("unsupported operation type [" + type + "]"); } Translog.Location location = translog.add(op); - tracker.markSeqNoAsCompleted(id); + tracker.markSeqNoAsProcessed(id); Translog.Location existing = writtenOps.put(op, location); if (existing != null) { fail("duplicate op [" + op + "], old entry at " + location); @@ -994,7 +1006,7 @@ public class TranslogTests extends ESTestCase { synchronized (flushMutex) { // we need not do this concurrently as we need to make sure that the generation // we're committing - is still present when we're committing - long localCheckpoint = tracker.getCheckpoint(); + long localCheckpoint = tracker.getProcessedCheckpoint(); translog.rollGeneration(); // expose the new checkpoint (simulating a commit), before we trim the translog lastCommittedLocalCheckpoint.set(localCheckpoint); @@ -1279,6 +1291,8 @@ public class TranslogTests extends ESTestCase { public void testTranslogWriter() throws IOException { final TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1); + final Set persistedSeqNos = new HashSet<>(); + persistedSeqNoConsumer.set(persistedSeqNos::add); final int numOps = randomIntBetween(8, 128); byte[] bytes = new byte[4]; ByteArrayDataOutput out = new ByteArrayDataOutput(bytes); @@ -1297,7 +1311,10 @@ public class TranslogTests extends ESTestCase { } writer.add(new BytesArray(bytes), seqNo); } + assertThat(persistedSeqNos, empty()); writer.sync(); + persistedSeqNos.remove(SequenceNumbers.UNASSIGNED_SEQ_NO); + assertEquals(seenSeqNos, persistedSeqNos); final BaseTranslogReader reader = randomBoolean() ? writer : translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME))); @@ -1401,7 +1418,7 @@ public class TranslogTests extends ESTestCase { } } else { translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}); assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1443,7 +1460,7 @@ public class TranslogTests extends ESTestCase { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); @@ -1459,7 +1476,7 @@ public class TranslogTests extends ESTestCase { } if (randomBoolean()) { // recover twice try (Translog translog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); @@ -1508,7 +1525,7 @@ public class TranslogTests extends ESTestCase { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); @@ -1525,7 +1542,7 @@ public class TranslogTests extends ESTestCase { if (randomBoolean()) { // recover twice try (Translog translog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); @@ -1573,7 +1590,7 @@ public class TranslogTests extends ESTestCase { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { fail("corrupted"); } catch (IllegalStateException ex) { assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3025, " + @@ -1584,7 +1601,7 @@ public class TranslogTests extends ESTestCase { Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); @@ -1853,12 +1870,14 @@ public class TranslogTests extends ESTestCase { final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()); try { - new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, + seqNo -> {}); fail("translog doesn't belong to this UUID"); } catch (TranslogCorruptedException ex) { } - this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, + seqNo -> {}); try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration, Long.MAX_VALUE)) { for (int i = firstUncommitted; i < translogOperations; i++) { Translog.Operation next = snapshot.next(); @@ -2052,7 +2071,7 @@ public class TranslogTests extends ESTestCase { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); assertFalse(tlog.syncNeeded()); @@ -2191,7 +2210,7 @@ public class TranslogTests extends ESTestCase { writtenOperations.removeIf(next -> checkpoint.offset < (next.location.translogLocation + next.location.size)); try (Translog tlog = new Translog(config, translogUUID, createTranslogDeletionPolicy(), - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}); Translog.Snapshot snapshot = tlog.newSnapshot()) { if (writtenOperations.size() != snapshot.totalOperations()) { for (int i = 0; i < threadCount; i++) { @@ -2241,7 +2260,7 @@ public class TranslogTests extends ESTestCase { deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}); assertThat(translog.getMinFileGeneration(), equalTo(1L)); // no trimming done yet, just recovered for (long gen = 1; gen < translog.currentFileGeneration(); gen++) { @@ -2300,7 +2319,7 @@ public class TranslogTests extends ESTestCase { deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { // we don't know when things broke exactly assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L)); assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration)); @@ -2382,7 +2401,8 @@ public class TranslogTests extends ESTestCase { translogUUID = Translog.createEmptyTranslog( config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, channelFactory, primaryTerm.get()); } - return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) { + return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, + seqNo -> {}) { @Override ChannelFactory getChannelFactory() { return channelFactory; @@ -2496,9 +2516,10 @@ public class TranslogTests extends ESTestCase { translog.close(); try { new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}) { @Override - protected TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint) + protected TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint, + LongConsumer persistedSequenceNumberConsumer) throws IOException { throw new MockDirectoryWrapper.FakeIOException(); } @@ -2559,7 +2580,7 @@ public class TranslogTests extends ESTestCase { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); TranslogException ex = expectThrows(TranslogException.class, () -> new Translog(config, translog.getTranslogUUID(), - translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)); + translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})); assertEquals(ex.getMessage(), "failed to create new translog file"); assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class); } @@ -2579,7 +2600,7 @@ public class TranslogTests extends ESTestCase { // we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog")); try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertFalse(tlog.syncNeeded()); try (Translog.Snapshot snapshot = tlog.newSnapshot()) { for (int i = 0; i < 1; i++) { @@ -2593,7 +2614,8 @@ public class TranslogTests extends ESTestCase { } TranslogException ex = expectThrows(TranslogException.class, - () -> new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)); + () -> new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, + seqNo -> {})); assertEquals(ex.getMessage(), "failed to create new translog file"); assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class); } @@ -2706,7 +2728,7 @@ public class TranslogTests extends ESTestCase { SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); } try (Translog translog = new Translog(config, generationUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}); Translog.Snapshot snapshot = translog.newSnapshotFromGen( new Translog.TranslogGeneration(generationUUID, minGenForRecovery), Long.MAX_VALUE)) { assertEquals(syncedDocs.size(), snapshot.totalOperations()); @@ -2773,14 +2795,16 @@ public class TranslogTests extends ESTestCase { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); translog.close(); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, + seqNo -> {}); translog.add(new Translog.Index("test", "2", 1, primaryTerm.get(), new byte[]{2})); translog.rollGeneration(); Closeable lock = translog.acquireRetentionLock(); translog.add(new Translog.Index("test", "3", 2, primaryTerm.get(), new byte[]{3})); translog.close(); IOUtils.close(lock); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, + seqNo -> {}); } public static Translog.Location randomTranslogLocation() { @@ -3101,7 +3125,7 @@ public class TranslogTests extends ESTestCase { class MisbehavingTranslog extends Translog { MisbehavingTranslog(TranslogConfig config, String translogUUID, TranslogDeletionPolicy deletionPolicy, LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) throws IOException { - super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier); + super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier, seqNo -> {}); } void callCloseDirectly() throws IOException { @@ -3223,7 +3247,7 @@ public class TranslogTests extends ESTestCase { assertFalse(brokenTranslog.isOpen()); try (Translog recoveredTranslog = new Translog(getTranslogConfig(path), brokenTranslog.getTranslogUUID(), - brokenTranslog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + brokenTranslog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { recoveredTranslog.rollGeneration(); assertFilePresences(recoveredTranslog); } diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index 519da8c66b6..3710988772a 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngineTests; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; @@ -274,9 +275,10 @@ public class FlushIT extends ESIntegTestCase { private void indexDoc(Engine engine, String id) throws IOException { final ParsedDocument doc = InternalEngineTests.createParsedDoc(id, null); final Engine.IndexResult indexResult = engine.index(new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc, - engine.getLocalCheckpoint() + 1, 1L, 1L, null, Engine.Operation.Origin.REPLICA, randomLong(), -1L, false, - SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); + ((InternalEngine) engine).getProcessedLocalCheckpoint() + 1, 1L, 1L, null, Engine.Operation.Origin.REPLICA, System.nanoTime(), + -1L, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); assertThat(indexResult.getFailure(), nullValue()); + engine.syncTranslog(); } public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 1dc2ba058b7..c3f6a3aae89 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -143,6 +143,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { // index #2 orgReplica.applyIndexOperationOnReplica(2, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, new SourceToParse(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON)); + orgReplica.sync(); // advance local checkpoint orgReplica.updateGlobalCheckpointOnReplica(3L, "test"); // index #5 -> force NoOp #4. orgReplica.applyIndexOperationOnReplica(5, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, @@ -207,6 +208,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { // index #2 orgReplica.applyIndexOperationOnReplica(2, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, new SourceToParse(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON)); + orgReplica.sync(); // advance local checkpoint orgReplica.updateGlobalCheckpointOnReplica(3L, "test"); // index #5 -> force NoOp #4. orgReplica.applyIndexOperationOnReplica(5, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, @@ -330,11 +332,11 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { @Override public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { super.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps, listener); - assertThat(replicaShard.getGlobalCheckpoint(), equalTo(primaryShard.getGlobalCheckpoint())); + assertThat(replicaShard.getLastKnownGlobalCheckpoint(), equalTo(primaryShard.getLastKnownGlobalCheckpoint())); } @Override public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException { - assertThat(globalCheckpoint, equalTo(primaryShard.getGlobalCheckpoint())); + assertThat(globalCheckpoint, equalTo(primaryShard.getLastKnownGlobalCheckpoint())); super.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData); } }, true, true); diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java index a005ebe673d..20f937fc6ef 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -74,10 +74,9 @@ public class CloseIndexIT extends ESIntegTestCase { @Override public Settings indexSettings() { - Settings.builder().put(super.indexSettings()) + return Settings.builder().put(super.indexSettings()) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), - new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); - return super.indexSettings(); + new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)).build(); } public void testCloseMissingIndex() { diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 59e7c21a3e6..95c9b1adf6a 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -1204,7 +1204,7 @@ public class IndexStatsIT extends ESIntegTestCase { for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { indexShard.sync(); - assertThat(indexShard.getLastSyncedGlobalCheckpoint(), equalTo(indexShard.getGlobalCheckpoint())); + assertThat(indexShard.getLastSyncedGlobalCheckpoint(), equalTo(indexShard.getLastKnownGlobalCheckpoint())); } } } diff --git a/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 973c687ebe8..3000d7262db 100644 --- a/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -108,6 +108,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { ensureGreen(); // ensure we have flushed segments and make them a big one via optimize client().admin().indices().prepareFlush().setForce(true).get(); + client().admin().indices().prepareFlush().setForce(true).get(); // double flush to create safe commit in case of async durability client().admin().indices().prepareForceMerge().setMaxNumSegments(1).setFlush(true).get(); final CountDownLatch latch = new CountDownLatch(1); @@ -119,7 +120,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { (connection, requestId, action, request, options) -> { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; - logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk()); + logger.info("file chunk [{}] lastChunk: {}", req, req.lastChunk()); if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) { latch.countDown(); throw new RuntimeException("Caused some truncated files for fun and profit"); diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java index 5bb4d4f065e..1dc7a6263d3 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java @@ -75,7 +75,6 @@ import static java.util.Collections.emptySet; public class FsRepositoryTests extends ESTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42905") public void testSnapshotAndRestore() throws IOException, InterruptedException { ThreadPool threadPool = new TestThreadPool(getClass().getSimpleName()); try (Directory directory = newDirectory()) { @@ -149,7 +148,7 @@ public class FsRepositoryTests extends ESTestCase { secondState.getIndex().fileDetails().stream().filter(f -> f.reused() == false).collect(Collectors.toList()); Collections.sort(recoveredFiles, Comparator.comparing(RecoveryState.File::name)); assertTrue(recoveredFiles.get(0).name(), recoveredFiles.get(0).name().endsWith(".liv")); - assertTrue(recoveredFiles.get(1).name(), recoveredFiles.get(1).name().endsWith("segments_2")); + assertTrue(recoveredFiles.get(1).name(), recoveredFiles.get(1).name().endsWith("segments_" + incIndexCommit.getGeneration())); } finally { terminate(threadPool); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 8acf78d301a..01040be7e90 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -825,22 +825,6 @@ public class TermsAggregatorTests extends AggregatorTestCase { public void testUnmapped() throws Exception { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { - Document document = new Document(); - document.add(new SortedDocValuesField("string", new BytesRef("a"))); - document.add(new NumericDocValuesField("long", 0L)); - document.add(new NumericDocValuesField("double", Double.doubleToRawLongBits(0L))); - indexWriter.addDocument(document); - MappedFieldType fieldType1 = new KeywordFieldMapper.KeywordFieldType(); - fieldType1.setName("another_string"); - fieldType1.setHasDocValues(true); - - MappedFieldType fieldType2 = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); - fieldType2.setName("another_long"); - fieldType2.setHasDocValues(true); - - MappedFieldType fieldType3 = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); - fieldType3.setName("another_double"); - fieldType3.setHasDocValues(true); try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { IndexSearcher indexSearcher = newIndexSearcher(indexReader); ValueType[] valueTypes = new ValueType[]{ValueType.STRING, ValueType.LONG, ValueType.DOUBLE}; @@ -848,13 +832,52 @@ public class TermsAggregatorTests extends AggregatorTestCase { for (int i = 0; i < fieldNames.length; i++) { TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name", valueTypes[i]) .field(fieldNames[i]); - Aggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType1, fieldType2, fieldType3); + Aggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, (MappedFieldType) null); aggregator.preCollection(); indexSearcher.search(new MatchAllDocsQuery(), aggregator); aggregator.postCollection(); Terms result = (Terms) aggregator.buildAggregation(0L); assertEquals("_name", result.getName()); assertEquals(0, result.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue((InternalTerms)result)); + } + } + } + } + } + + public void testUnmappedWithMissing() throws Exception { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + + Document document = new Document(); + document.add(new NumericDocValuesField("unrelated_value", 100)); + indexWriter.addDocument(document); + + try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { + + MappedFieldType fieldType1 = new KeywordFieldMapper.KeywordFieldType(); + fieldType1.setName("unrelated_value"); + fieldType1.setHasDocValues(true); + + IndexSearcher indexSearcher = newIndexSearcher(indexReader); + ValueType[] valueTypes = new ValueType[]{ValueType.STRING, ValueType.LONG, ValueType.DOUBLE}; + String[] fieldNames = new String[]{"string", "long", "double"}; + Object[] missingValues = new Object[]{"abc", 19L, 19.2}; + + + for (int i = 0; i < fieldNames.length; i++) { + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name", valueTypes[i]) + .field(fieldNames[i]).missing(missingValues[i]); + Aggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType1); + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + Terms result = (Terms) aggregator.buildAggregation(0L); + assertEquals("_name", result.getName()); + assertEquals(1, result.getBuckets().size()); + assertEquals(missingValues[i], result.getBuckets().get(0).getKey()); + assertEquals(1, result.getBuckets().get(0).getDocCount()); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java index d9621786612..b0e22283dbf 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java @@ -124,9 +124,41 @@ public class MaxAggregatorTests extends AggregatorTestCase { }); } + public void testUnmappedField() throws IOException { + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("_name").field("number"); + testCase(aggregationBuilder, new DocValuesFieldExistsQuery("number"), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 1))); + }, max -> { + assertEquals(max.getValue(), Double.NEGATIVE_INFINITY, 0); + assertFalse(AggregationInspectionHelper.hasValue(max)); + }, null); + } + + public void testUnmappedWithMissingField() throws IOException { + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("_name").field("number").missing(19L); + + testCase(aggregationBuilder, new DocValuesFieldExistsQuery("number"), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 1))); + }, max -> { + assertEquals(max.getValue(), 19.0, 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); + }, null); + } + private void testCase(Query query, - CheckedConsumer buildIndex, - Consumer verify) throws IOException { + CheckedConsumer buildIndex, + Consumer verify) throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("number"); + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("_name").field("number"); + testCase(aggregationBuilder, query, buildIndex, verify, fieldType); + } + + private void testCase(MaxAggregationBuilder aggregationBuilder, Query query, + CheckedConsumer buildIndex, + Consumer verify, MappedFieldType fieldType) throws IOException { Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); buildIndex.accept(indexWriter); @@ -135,10 +167,6 @@ public class MaxAggregatorTests extends AggregatorTestCase { IndexReader indexReader = DirectoryReader.open(directory); IndexSearcher indexSearcher = newSearcher(indexReader, true, true); - MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("_name").field("number"); - MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); - fieldType.setName("number"); - MaxAggregator aggregator = createAggregator(query, aggregationBuilder, indexSearcher, createIndexSettings(), fieldType); aggregator.preCollection(); indexSearcher.search(query, aggregator); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 26ab74bbc53..ec0106de5ef 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -3606,6 +3606,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas for (int i = 10; i < 15; i++) { index(indexName, "_doc", Integer.toString(i), "foo", "bar" + i); } + client().admin().indices().prepareFlush(indexName).setForce(true).setWaitIfOngoing(true).get(); stats = client().admin().indices().prepareStats(indexName).clear().get(); shardStats = stats.getShards()[0]; diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index e066f3dc943..94faee93ea3 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -862,13 +862,14 @@ public class SnapshotResiliencyTests extends ESTestCase { } public void clearNetworkDisruptions() { - disruptedLinks.disconnected.forEach(nodeName -> { + final Set disconnectedNodes = new HashSet<>(disruptedLinks.disconnected); + disruptedLinks.clear(); + disconnectedNodes.forEach(nodeName -> { if (testClusterNodes.nodes.containsKey(nodeName)) { final DiscoveryNode node = testClusterNodes.nodes.get(nodeName).node; testClusterNodes.nodes.values().forEach(n -> n.transportService.getConnectionManager().openConnection(node, null)); } }); - disruptedLinks.clear(); } private NetworkDisruption.DisruptedLinks getDisruption() { diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 200df75cc6c..41695872f03 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -882,6 +883,81 @@ public class RemoteClusterServiceTests extends ESTestCase { } } + public void testReconnectWhenSeedsNodesAreUpdated() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService cluster_node_0 = startTransport("cluster_node_0", knownNodes, Version.CURRENT); + MockTransportService cluster_node_1 = startTransport("cluster_node_1", knownNodes, Version.CURRENT)) { + + final DiscoveryNode node0 = cluster_node_0.getLocalDiscoNode(); + final DiscoveryNode node1 = cluster_node_1.getLocalDiscoNode(); + knownNodes.add(node0); + knownNodes.add(node1); + Collections.shuffle(knownNodes, random()); + + try (MockTransportService transportService = + MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + transportService.start(); + transportService.acceptIncomingRequests(); + + final Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster_test.seeds", Collections.singletonList(node0.getAddress().toString())); + try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { + assertFalse(service.isCrossClusterSearchEnabled()); + service.initializeRemoteClusters(); + assertTrue(service.isCrossClusterSearchEnabled()); + + final RemoteClusterConnection firstRemoteClusterConnection = service.getRemoteClusterConnection("cluster_test"); + assertTrue(firstRemoteClusterConnection.isNodeConnected(node0)); + assertTrue(firstRemoteClusterConnection.isNodeConnected(node1)); + assertEquals(2, firstRemoteClusterConnection.getNumNodesConnected()); + assertFalse(firstRemoteClusterConnection.isClosed()); + + final CountDownLatch firstLatch = new CountDownLatch(1); + service.updateRemoteCluster( + "cluster_test", + Collections.singletonList(node0.getAddress().toString()), null, + genericProfile("cluster_test"), connectionListener(firstLatch)); + firstLatch.await(); + + assertTrue(service.isCrossClusterSearchEnabled()); + assertTrue(firstRemoteClusterConnection.isNodeConnected(node0)); + assertTrue(firstRemoteClusterConnection.isNodeConnected(node1)); + assertEquals(2, firstRemoteClusterConnection.getNumNodesConnected()); + assertFalse(firstRemoteClusterConnection.isClosed()); + assertSame(firstRemoteClusterConnection, service.getRemoteClusterConnection("cluster_test")); + + final List newSeeds = new ArrayList<>(); + newSeeds.add(node1.getAddress().toString()); + if (randomBoolean()) { + newSeeds.add(node0.getAddress().toString()); + Collections.shuffle(newSeeds, random()); + } + + final CountDownLatch secondLatch = new CountDownLatch(1); + service.updateRemoteCluster( + "cluster_test", + newSeeds, null, + genericProfile("cluster_test"), connectionListener(secondLatch)); + secondLatch.await(); + + assertTrue(service.isCrossClusterSearchEnabled()); + assertBusy(() -> { + assertFalse(firstRemoteClusterConnection.isNodeConnected(node0)); + assertFalse(firstRemoteClusterConnection.isNodeConnected(node1)); + assertEquals(0, firstRemoteClusterConnection.getNumNodesConnected()); + assertTrue(firstRemoteClusterConnection.isClosed()); + }); + + final RemoteClusterConnection secondRemoteClusterConnection = service.getRemoteClusterConnection("cluster_test"); + assertTrue(secondRemoteClusterConnection.isNodeConnected(node0)); + assertTrue(secondRemoteClusterConnection.isNodeConnected(node1)); + assertEquals(2, secondRemoteClusterConnection.getNumNodesConnected()); + assertFalse(secondRemoteClusterConnection.isClosed()); + } + } + } + } + public void testRemoteClusterWithProxy() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService cluster_1_node0 = startTransport("cluster_1_node0", knownNodes, Version.CURRENT); diff --git a/server/src/test/resources/org/elasticsearch/action/search/msearch-empty-first-line1.json b/server/src/test/resources/org/elasticsearch/action/search/msearch-empty-first-line1.json deleted file mode 100644 index b417d3adc9b..00000000000 --- a/server/src/test/resources/org/elasticsearch/action/search/msearch-empty-first-line1.json +++ /dev/null @@ -1,9 +0,0 @@ - - -{ "query": {"match_all": {}}} -{} -{ "query": {"match_all": {}}} - -{ "query": {"match_all": {}}} -{} -{ "query": {"match_all": {}}} diff --git a/server/src/test/resources/org/elasticsearch/action/search/msearch-empty-first-line2.json b/server/src/test/resources/org/elasticsearch/action/search/msearch-empty-first-line2.json deleted file mode 100644 index c1c29b41701..00000000000 --- a/server/src/test/resources/org/elasticsearch/action/search/msearch-empty-first-line2.json +++ /dev/null @@ -1,9 +0,0 @@ - -{} -{ "query": {"match_all": {}}} - -{ "query": {"match_all": {}}} -{} -{ "query": {"match_all": {}}} - -{ "query": {"match_all": {}}} diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 564b768c47d..968739d3319 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -411,7 +411,7 @@ public abstract class EngineTestCase extends ESTestCase { String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTermSupplier.getAsLong()); return new Translog(translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTermSupplier); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTermSupplier, seqNo -> {}); } protected TranslogHandler createTranslogHandler(IndexSettings indexSettings) { @@ -835,7 +835,7 @@ public abstract class EngineTestCase extends ESTestCase { final Engine.Operation.TYPE opType = randomFrom(Engine.Operation.TYPE.values()); final boolean isNestedDoc = includeNestedDocs && opType == Engine.Operation.TYPE.INDEX && randomBoolean(); final int nestedValues = between(0, 3); - final long startTime = threadPool.relativeTimeInMillis(); + final long startTime = threadPool.relativeTimeInNanos(); final int copies = allowDuplicate && rarely() ? between(2, 4) : 1; for (int copy = 0; copy < copies; copy++) { final ParsedDocument doc = isNestedDoc ? nestedParsedDocFactory.apply(id, nestedValues) : createParsedDoc(id, null); @@ -1147,7 +1147,7 @@ public abstract class EngineTestCase extends ESTestCase { * @throws InterruptedException if the thread was interrupted while blocking on the condition */ public static void waitForOpsToComplete(InternalEngine engine, long seqNo) throws InterruptedException { - engine.getLocalCheckpointTracker().waitForOpsToComplete(seqNo); + engine.getLocalCheckpointTracker().waitForProcessedOpsToComplete(seqNo); } public static boolean hasSnapshottedCommits(Engine engine) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index 2b597a64c37..8e8b4687844 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -113,6 +113,7 @@ public class TranslogHandler implements Engine.TranslogRecoveryRunner { opsRecovered++; appliedOperations.incrementAndGet(); } + engine.syncTranslog(); return opsRecovered; } diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 75a884ac760..2b08768af04 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -552,7 +552,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase public void executeRetentionLeasesSyncRequestOnReplica(RetentionLeaseSyncAction.Request request, IndexShard replica) { final PlainActionFuture acquirePermitFuture = new PlainActionFuture<>(); - replica.acquireReplicaOperationPermit(getPrimary().getOperationPrimaryTerm(), getPrimary().getGlobalCheckpoint(), + replica.acquireReplicaOperationPermit(getPrimary().getOperationPrimaryTerm(), getPrimary().getLastKnownGlobalCheckpoint(), getPrimary().getMaxSeqNoOfUpdatesOrDeletes(), acquirePermitFuture, ThreadPool.Names.SAME, request); try (Releasable ignored = acquirePermitFuture.actionGet()) { replica.updateRetentionLeasesOnReplica(request.getRetentionLeases()); @@ -659,7 +659,12 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase @Override public long globalCheckpoint() { - return getPrimaryShard().getGlobalCheckpoint(); + return getPrimaryShard().getLastSyncedGlobalCheckpoint(); + } + + @Override + public long computedGlobalCheckpoint() { + return getPrimaryShard().getLastKnownGlobalCheckpoint(); } @Override @@ -693,7 +698,8 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase try { performOnReplica(request, replica); releasable.close(); - delegatedListener.onResponse(new ReplicaResponse(replica.getLocalCheckpoint(), replica.getGlobalCheckpoint())); + delegatedListener.onResponse(new ReplicaResponse(replica.getLocalCheckpoint(), + replica.getLastKnownGlobalCheckpoint())); } catch (final Exception e) { Releasables.closeWhileHandlingException(releasable); delegatedListener.onFailure(e); @@ -756,7 +762,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase @Override protected void performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { executeShardBulkOnReplica(request, replica, getPrimaryShard().getPendingPrimaryTerm(), - getPrimaryShard().getGlobalCheckpoint(), getPrimaryShard().getMaxSeqNoOfUpdatesOrDeletes()); + getPrimaryShard().getLastKnownGlobalCheckpoint(), getPrimaryShard().getMaxSeqNoOfUpdatesOrDeletes()); } } @@ -827,7 +833,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase void indexOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica, long term) throws Exception { executeShardBulkOnReplica(request, replica, term, - group.primary.getGlobalCheckpoint(), group.primary.getMaxSeqNoOfUpdatesOrDeletes()); + group.primary.getLastKnownGlobalCheckpoint(), group.primary.getMaxSeqNoOfUpdatesOrDeletes()); } /** @@ -835,7 +841,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase */ void deleteOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica) throws Exception { executeShardBulkOnReplica(request, replica, group.primary.getPendingPrimaryTerm(), - group.primary.getGlobalCheckpoint(), group.primary.getMaxSeqNoOfUpdatesOrDeletes()); + group.primary.getLastKnownGlobalCheckpoint(), group.primary.getMaxSeqNoOfUpdatesOrDeletes()); } class GlobalCheckpointSync extends ReplicationAction< @@ -884,7 +890,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase @Override protected void performOnReplica(ResyncReplicationRequest request, IndexShard replica) throws Exception { executeResyncOnReplica(replica, request, getPrimaryShard().getPendingPrimaryTerm(), - getPrimaryShard().getGlobalCheckpoint(), getPrimaryShard().getMaxSeqNoOfUpdatesOrDeletes()); + getPrimaryShard().getLastKnownGlobalCheckpoint(), getPrimaryShard().getMaxSeqNoOfUpdatesOrDeletes()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index a14fbd1583f..4b5be292057 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -753,12 +753,14 @@ public abstract class IndexShardTestCase extends ESTestCase { result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); } + shard.sync(); // advance local checkpoint shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(), shard.getLocalCheckpoint()); } else { final long seqNo = shard.seqNoStats().getMaxSeqNo() + 1; shard.advanceMaxSeqNoOfUpdatesOrDeletes(seqNo); // manually replicate max_seq_no_of_updates result = shard.applyIndexOperationOnReplica(seqNo, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse); + shard.sync(); // advance local checkpoint if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { throw new TransportReplicationAction.RetryOnReplicaException(shard.shardId, "Mappings are not available on the replica yet, triggered update: " + result.getRequiredMappingUpdate()); @@ -777,11 +779,14 @@ public abstract class IndexShardTestCase extends ESTestCase { if (shard.routingEntry().primary()) { result = shard.applyDeleteOperationOnPrimary( Versions.MATCH_ANY, type, id, VersionType.INTERNAL, SequenceNumbers.UNASSIGNED_SEQ_NO, 0); - shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(), shard.getEngine().getLocalCheckpoint()); + shard.sync(); // advance local checkpoint + shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(), + shard.getLocalCheckpoint()); } else { final long seqNo = shard.seqNoStats().getMaxSeqNo() + 1; shard.advanceMaxSeqNoOfUpdatesOrDeletes(seqNo); // manually replicate max_seq_no_of_updates result = shard.applyDeleteOperationOnReplica(seqNo, 0L, type, id); + shard.sync(); // advance local checkpoint } return result; } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 597c2a5ac84..5f8b48e70a6 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -86,6 +86,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.function.Function; import java.util.stream.Collectors; @@ -161,6 +162,7 @@ public abstract class AggregatorTestCase extends ESTestCase { when(searchContext.getQueryShardContext()).thenReturn(queryShardContext); Map fieldNameToType = new HashMap<>(); fieldNameToType.putAll(Arrays.stream(fieldTypes) + .filter(Objects::nonNull) .collect(Collectors.toMap(MappedFieldType::name, Function.identity()))); fieldNameToType.putAll(getFieldAliases(fieldTypes)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java index c2e97f35fae..d46c09e1262 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java +++ b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java @@ -27,8 +27,10 @@ import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -141,7 +143,7 @@ public class BackgroundIndexer implements AutoCloseable { } } - BulkRequestBuilder bulkRequest = client.prepareBulk(); + BulkRequestBuilder bulkRequest = client.prepareBulk().setTimeout(timeout); for (int i = 0; i < batchSize; i++) { id = idGenerator.incrementAndGet(); if (useAutoGeneratedIDs) { @@ -151,16 +153,21 @@ public class BackgroundIndexer implements AutoCloseable { .setSource(generateSource(id, threadRandom))); } } - BulkResponse bulkResponse = bulkRequest.get(); - for (BulkItemResponse bulkItemResponse : bulkResponse) { - if (bulkItemResponse.isFailed() == false) { - boolean add = ids.add(bulkItemResponse.getId()); - assert add : "ID: " + bulkItemResponse.getId() + " already used"; - } else { - failures.add(bulkItemResponse.getFailure().getCause()); + try { + BulkResponse bulkResponse = bulkRequest.get(); + for (BulkItemResponse bulkItemResponse : bulkResponse) { + if (bulkItemResponse.isFailed() == false) { + boolean add = ids.add(bulkItemResponse.getId()); + assert add : "ID: " + bulkItemResponse.getId() + " already used"; + } else { + failures.add(bulkItemResponse.getFailure().getCause()); + } + } + } catch (Exception e) { + if (ignoreIndexingFailures == false) { + throw e; } } - } else { if (hasBudget.get() && !availableBudget.tryAcquire(250, TimeUnit.MILLISECONDS)) { @@ -169,15 +176,27 @@ public class BackgroundIndexer implements AutoCloseable { } id = idGenerator.incrementAndGet(); if (useAutoGeneratedIDs) { - IndexResponse indexResponse = client.prepareIndex(index, type) - .setSource(generateSource(id, threadRandom)).get(); - boolean add = ids.add(indexResponse.getId()); - assert add : "ID: " + indexResponse.getId() + " already used"; + try { + IndexResponse indexResponse = client.prepareIndex(index, type) + .setTimeout(timeout).setSource(generateSource(id, threadRandom)).get(); + boolean add = ids.add(indexResponse.getId()); + assert add : "ID: " + indexResponse.getId() + " already used"; + } catch (Exception e) { + if (ignoreIndexingFailures == false) { + throw e; + } + } } else { - IndexResponse indexResponse = client.prepareIndex(index, type, Long.toString(id)) - .setSource(generateSource(id, threadRandom)).get(); - boolean add = ids.add(indexResponse.getId()); - assert add : "ID: " + indexResponse.getId() + " already used"; + try { + IndexResponse indexResponse = client.prepareIndex(index, type, Long.toString(id)) + .setTimeout(timeout).setSource(generateSource(id, threadRandom)).get(); + boolean add = ids.add(indexResponse.getId()); + assert add : "ID: " + indexResponse.getId() + " already used"; + } catch (Exception e) { + if (ignoreIndexingFailures == false) { + throw e; + } + } } } } @@ -217,6 +236,18 @@ public class BackgroundIndexer implements AutoCloseable { } + private volatile TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; + + public void setRequestTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + private volatile boolean ignoreIndexingFailures; + + public void setIgnoreIndexingFailures(boolean ignoreIndexingFailures) { + this.ignoreIndexingFailures = ignoreIndexingFailures; + } + private void setBudget(int numOfDocs) { logger.debug("updating budget to [{}]", numOfDocs); if (numOfDocs >= 0) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index de0103e2b57..01ed946cab3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1441,7 +1441,7 @@ public final class InternalTestCluster extends TestCluster { } } } - }); + }, 30, TimeUnit.SECONDS); } /** @@ -1897,6 +1897,8 @@ public final class InternalTestCluster extends TestCluster { nodesByRoles.computeIfAbsent(discoveryNode.getRoles(), k -> new ArrayList<>()).add(nodeAndClient); } + callback.onAllNodesStopped(); + assert nodesByRoles.values().stream().mapToInt(List::size).sum() == nodeCount; // randomize start up order, but making sure that: @@ -2374,6 +2376,9 @@ public final class InternalTestCluster extends TestCluster { return Settings.EMPTY; } + public void onAllNodesStopped() throws Exception { + } + /** * Executed for each node before the {@code n + 1} node is restarted. The given client is * an active client to the node that will be restarted next. diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java index c523aa15e58..eb39b1c16d0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java @@ -87,8 +87,14 @@ public abstract class DisruptableMockTransport extends MockTransport { @Override public Releasable openConnection(DiscoveryNode node, ConnectionProfile profile, ActionListener listener) { - final Optional matchingTransport = getDisruptableMockTransport(node.getAddress()); - if (matchingTransport.isPresent()) { + final Optional optionalMatchingTransport = getDisruptableMockTransport(node.getAddress()); + if (optionalMatchingTransport.isPresent()) { + final DisruptableMockTransport matchingTransport = optionalMatchingTransport.get(); + final ConnectionStatus connectionStatus = getConnectionStatus(matchingTransport.getLocalNode()); + if (connectionStatus != ConnectionStatus.CONNECTED) { + throw new ConnectTransportException(node, "node [" + node + "] is [" + connectionStatus + "] not [CONNECTED]"); + } + listener.onResponse(new CloseableConnection() { @Override public DiscoveryNode getNode() { @@ -98,12 +104,12 @@ public abstract class DisruptableMockTransport extends MockTransport { @Override public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws TransportException { - onSendRequest(requestId, action, request, matchingTransport.get()); + onSendRequest(requestId, action, request, matchingTransport); } }); return () -> {}; } else { - throw new ConnectTransportException(node, "node " + node + " does not exist"); + throw new ConnectTransportException(node, "node [" + node + "] does not exist"); } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index bd260da169c..9a270ec722a 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -84,7 +84,7 @@ public class MockNioTransport extends TcpTransport { PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); - this.transportThreadWatchdog = new TransportThreadWatchdog(threadPool); + this.transportThreadWatchdog = new TransportThreadWatchdog(threadPool, settings); } @Override @@ -330,21 +330,20 @@ public class MockNioTransport extends TcpTransport { } static final class TransportThreadWatchdog { - - private static final long WARN_THRESHOLD = TimeUnit.MILLISECONDS.toNanos(150); - // Only check every 2s to not flood the logs on a blocked thread. // We mostly care about long blocks and not random slowness anyway and in tests would randomly catch slow operations that block for // less than 2s eventually. private static final TimeValue CHECK_INTERVAL = TimeValue.timeValueSeconds(2); + private final long warnThreshold; private final ThreadPool threadPool; private final ConcurrentHashMap registry = new ConcurrentHashMap<>(); private volatile boolean stopped; - TransportThreadWatchdog(ThreadPool threadPool) { + TransportThreadWatchdog(ThreadPool threadPool, Settings settings) { this.threadPool = threadPool; + warnThreshold = ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.get(settings).nanos() + TimeValue.timeValueMillis(100L).nanos(); threadPool.schedule(this::logLongRunningExecutions, CHECK_INTERVAL, ThreadPool.Names.GENERIC); } @@ -361,7 +360,7 @@ public class MockNioTransport extends TcpTransport { private void maybeLogElapsedTime(long startTime) { long elapsedTime = threadPool.relativeTimeInNanos() - startTime; - if (elapsedTime > WARN_THRESHOLD) { + if (elapsedTime > warnThreshold) { logger.warn( new ParameterizedMessage("Slow execution on network thread [{} milliseconds]", TimeUnit.NANOSECONDS.toMillis(elapsedTime)), @@ -372,9 +371,11 @@ public class MockNioTransport extends TcpTransport { private void logLongRunningExecutions() { for (Map.Entry entry : registry.entrySet()) { final long elapsedTimeInNanos = threadPool.relativeTimeInNanos() - entry.getValue(); - if (elapsedTimeInNanos > WARN_THRESHOLD) { + if (elapsedTimeInNanos > warnThreshold) { final Thread thread = entry.getKey(); - logger.warn("Potentially blocked execution on network thread [{}] [{} milliseconds]: \n{}", thread.getName(), + logger.warn("Potentially blocked execution on network thread [{}] [{}] [{} milliseconds]: \n{}", + thread.getName(), + thread.getState(), TimeUnit.NANOSECONDS.toMillis(elapsedTimeInNanos), Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n"))); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/disruption/DisruptableMockTransportTests.java b/test/framework/src/test/java/org/elasticsearch/test/disruption/DisruptableMockTransportTests.java index 4060b7f5cd8..90a47d09e6c 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/disruption/DisruptableMockTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/disruption/DisruptableMockTransportTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.disruption.DisruptableMockTransport.ConnectionStatus; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; @@ -53,6 +54,7 @@ import java.util.function.Consumer; import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; public class DisruptableMockTransportTests extends ESTestCase { @@ -399,4 +401,27 @@ public class DisruptableMockTransportTests extends ESTestCase { deterministicTaskQueue.runAllRunnableTasks(); assertTrue(responseHandlerCalled.get()); } + + public void testBrokenLinkFailsToConnect() { + service1.disconnectFromNode(node2); + + disconnectedLinks.add(Tuple.tuple(node1, node2)); + assertThat(expectThrows(ConnectTransportException.class, () -> service1.connectToNode(node2)).getMessage(), + endsWith("is [DISCONNECTED] not [CONNECTED]")); + disconnectedLinks.clear(); + + blackholedLinks.add(Tuple.tuple(node1, node2)); + assertThat(expectThrows(ConnectTransportException.class, () -> service1.connectToNode(node2)).getMessage(), + endsWith("is [BLACK_HOLE] not [CONNECTED]")); + blackholedLinks.clear(); + + blackholedRequestLinks.add(Tuple.tuple(node1, node2)); + assertThat(expectThrows(ConnectTransportException.class, () -> service1.connectToNode(node2)).getMessage(), + endsWith("is [BLACK_HOLE_REQUESTS_ONLY] not [CONNECTED]")); + blackholedRequestLinks.clear(); + + final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), Version.CURRENT); + assertThat(expectThrows(ConnectTransportException.class, () -> service1.connectToNode(node3)).getMessage(), + endsWith("does not exist")); + } } diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java index 424d4922f02..d8bb790b2a3 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.test.ESTestCase; @@ -57,7 +58,7 @@ public class TestEventHandlerTests extends ESTestCase { public void testLogOnElapsedTime() throws Exception { long start = System.nanoTime(); - long end = start + TimeUnit.MILLISECONDS.toNanos(200); + long end = start + TimeUnit.MILLISECONDS.toNanos(400); AtomicBoolean isStart = new AtomicBoolean(true); LongSupplier timeSupplier = () -> { if (isStart.compareAndSet(true, false)) { @@ -70,7 +71,7 @@ public class TestEventHandlerTests extends ESTestCase { final ThreadPool threadPool = mock(ThreadPool.class); doAnswer(i -> timeSupplier.getAsLong()).when(threadPool).relativeTimeInNanos(); TestEventHandler eventHandler = - new TestEventHandler((e) -> {}, () -> null, new MockNioTransport.TransportThreadWatchdog(threadPool)); + new TestEventHandler(e -> {}, () -> null, new MockNioTransport.TransportThreadWatchdog(threadPool, Settings.EMPTY)); ServerChannelContext serverChannelContext = mock(ServerChannelContext.class); SocketChannelContext socketChannelContext = mock(SocketChannelContext.class); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java index 45ffbf6998d..5f8f1d5368a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java @@ -144,10 +144,11 @@ public class TransportBulkShardOperationsAction assert failure.getSeqNo() == targetOp.seqNo() : targetOp.seqNo() + " != " + failure.getSeqNo(); if (failure.getExistingPrimaryTerm().isPresent()) { appliedOperations.add(rewriteOperationWithPrimaryTerm(sourceOp, failure.getExistingPrimaryTerm().getAsLong())); - } else if (targetOp.seqNo() > primary.getGlobalCheckpoint()) { - assert false : "can't find primary_term for existing op=" + targetOp + " gcp=" + primary.getGlobalCheckpoint(); + } else if (targetOp.seqNo() > primary.getLastKnownGlobalCheckpoint()) { + assert false : + "can't find primary_term for existing op=" + targetOp + " gcp=" + primary.getLastKnownGlobalCheckpoint(); throw new IllegalStateException("can't find primary_term for existing op=" + targetOp + - " global_checkpoint=" + primary.getGlobalCheckpoint(), failure); + " global_checkpoint=" + primary.getLastKnownGlobalCheckpoint(), failure); } } else { assert false : "Only already-processed error should happen; op=[" + targetOp + "] error=[" + result.getFailure() + "]"; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java index 619e0a04baf..8d4f0b219bd 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java @@ -90,7 +90,7 @@ public final class FollowingEngine extends InternalEngine { } else { return IndexingStrategy.processButSkipLucene(false, index.version()); } - } else if (maxSeqNoOfUpdatesOrDeletes <= getLocalCheckpoint()) { + } else if (maxSeqNoOfUpdatesOrDeletes <= getProcessedLocalCheckpoint()) { assert maxSeqNoOfUpdatesOrDeletes < index.seqNo() : "seq_no[" + index.seqNo() + "] <= msu[" + maxSeqNoOfUpdatesOrDeletes + "]"; numOfOptimizedIndexing.inc(); return InternalEngine.IndexingStrategy.optimizedAppendOnly(index.version()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 2fedacabc93..a6406df0fbe 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -491,10 +491,10 @@ public class CcrRepository extends AbstractLifecycleComponent implements Reposit while (offset < fileLength && error.get() == null) { final long requestSeqId = requestSeqIdTracker.generateSeqNo(); try { - requestSeqIdTracker.waitForOpsToComplete(requestSeqId - ccrSettings.getMaxConcurrentFileChunks()); + requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqId - ccrSettings.getMaxConcurrentFileChunks()); if (error.get() != null) { - requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); break; } @@ -514,7 +514,7 @@ public class CcrRepository extends AbstractLifecycleComponent implements Reposit @Override public void onFailure(Exception e) { error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); - requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); } @Override @@ -526,24 +526,24 @@ public class CcrRepository extends AbstractLifecycleComponent implements Reposit throttleListener.accept(nanosPaused); final boolean lastChunk = r.getOffset() + actualChunkSize >= fileLength; multiFileWriter.writeFileChunk(fileInfo.metadata(), r.getOffset(), r.getChunk(), lastChunk); - requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); } }), e -> { error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); - requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); } ), timeout, ThreadPool.Names.GENERIC, GetCcrRestoreFileChunkAction.NAME); remoteClient.execute(GetCcrRestoreFileChunkAction.INSTANCE, request, listener); } catch (Exception e) { error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); - requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); } } } try { - requestSeqIdTracker.waitForOpsToComplete(requestSeqIdTracker.getMaxSeqNo()); + requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqIdTracker.getMaxSeqNo()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new ElasticsearchException(e); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java index dac8764d48b..f3cbc731de1 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java @@ -284,7 +284,7 @@ public class FollowerFailOverIT extends CcrIntegTestCase { IndexResponse indexResp = leaderCluster.client().prepareIndex("leader-index", "doc", "1") .setSource("{\"balance\": 100}", XContentType.JSON).setTimeout(TimeValue.ZERO).get(); assertThat(indexResp.getResult(), equalTo(DocWriteResponse.Result.CREATED)); - assertThat(indexShard.getGlobalCheckpoint(), equalTo(0L)); + assertThat(indexShard.getLastKnownGlobalCheckpoint(), equalTo(0L)); // Make sure at least one read-request which requires mapping sync is completed. assertBusy(() -> { CcrClient ccrClient = new CcrClient(followerClient()); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java index e8b21f05c5c..9f6850fe20f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java @@ -67,7 +67,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { int size = max - min + 1; final Translog.Operation[] operations = ShardChangesAction.getOperations( indexShard, - indexShard.getGlobalCheckpoint(), + indexShard.getLastKnownGlobalCheckpoint(), min, size, indexShard.getHistoryUUID(), @@ -83,7 +83,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { IllegalStateException.class, () -> ShardChangesAction.getOperations( indexShard, - indexShard.getGlobalCheckpoint(), + indexShard.getLastKnownGlobalCheckpoint(), numWrites, numWrites + 1, indexShard.getHistoryUUID(), @@ -92,18 +92,19 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { Locale.ROOT, "not exposing operations from [%d] greater than the global checkpoint [%d]", numWrites, - indexShard.getGlobalCheckpoint()); + indexShard.getLastKnownGlobalCheckpoint()); assertThat(e, hasToString(containsString(message))); } // get operations for a range some operations do not exist: - Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getLastKnownGlobalCheckpoint(), numWrites - 10, numWrites + 10, indexShard.getHistoryUUID(), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)); assertThat(operations.length, equalTo(10)); // Unexpected history UUID: Exception e = expectThrows(IllegalStateException.class, () -> ShardChangesAction.getOperations(indexShard, - indexShard.getGlobalCheckpoint(), 0, 10, "different-history-uuid", new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES))); + indexShard.getLastKnownGlobalCheckpoint(), 0, 10, "different-history-uuid", + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES))); assertThat(e.getMessage(), equalTo("unexpected history uuid, expected [different-history-uuid], actual [" + indexShard.getHistoryUUID() + "]")); @@ -112,7 +113,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { final long fromSeqNo = randomLongBetween(Long.MIN_VALUE, -1); final int batchSize = randomIntBetween(0, Integer.MAX_VALUE); final IllegalArgumentException invalidRangeError = expectThrows(IllegalArgumentException.class, - () -> ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + () -> ShardChangesAction.getOperations(indexShard, indexShard.getLastKnownGlobalCheckpoint(), fromSeqNo, batchSize, indexShard.getHistoryUUID(), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES))); assertThat(invalidRangeError.getMessage(), equalTo("Invalid range; from_seqno [" + fromSeqNo + "], to_seqno [" + (fromSeqNo + batchSize - 1) + "]")); @@ -125,7 +126,8 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { ShardRouting shardRouting = TestShardRouting.newShardRouting("index", 0, "_node_id", true, ShardRoutingState.INITIALIZING); Mockito.when(indexShard.routingEntry()).thenReturn(shardRouting); expectThrows(IndexShardNotStartedException.class, () -> ShardChangesAction.getOperations(indexShard, - indexShard.getGlobalCheckpoint(), 0, 1, indexShard.getHistoryUUID(), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES))); + indexShard.getLastKnownGlobalCheckpoint(), 0, 1, indexShard.getHistoryUUID(), + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES))); } public void testGetOperationsExceedByteLimit() throws Exception { @@ -142,7 +144,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { } final IndexShard indexShard = indexService.getShard(0); - final Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + final Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getLastKnownGlobalCheckpoint(), 0, 12, indexShard.getHistoryUUID(), new ByteSizeValue(256, ByteSizeUnit.BYTES)); assertThat(operations.length, equalTo(12)); assertThat(operations[0].seqNo(), equalTo(0L)); @@ -172,7 +174,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { final IndexShard indexShard = indexService.getShard(0); final Translog.Operation[] operations = ShardChangesAction.getOperations( - indexShard, indexShard.getGlobalCheckpoint(), 0, 1, indexShard.getHistoryUUID(), ByteSizeValue.ZERO); + indexShard, indexShard.getLastKnownGlobalCheckpoint(), 0, 1, indexShard.getHistoryUUID(), ByteSizeValue.ZERO); assertThat(operations.length, equalTo(1)); assertThat(operations[0].seqNo(), equalTo(0L)); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java index d2b424dc66f..21d5d3547b5 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java @@ -139,13 +139,13 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase { Consumer handler, Consumer errorHandler) { for(Translog.Operation op : operations) { - tracker.markSeqNoAsCompleted(op.seqNo()); + tracker.markSeqNoAsProcessed(op.seqNo()); } receivedOperations.addAll(operations); // Emulate network thread and avoid SO: final BulkShardOperationsResponse response = new BulkShardOperationsResponse(); - response.setGlobalCheckpoint(tracker.getCheckpoint()); + response.setGlobalCheckpoint(tracker.getProcessedCheckpoint()); response.setMaxSeqNo(tracker.getMaxSeqNo()); threadPool.generic().execute(() -> handler.accept(response)); } @@ -180,7 +180,7 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase { } } else { assert from >= testRun.finalExpectedGlobalCheckpoint; - final long globalCheckpoint = tracker.getCheckpoint(); + final long globalCheckpoint = tracker.getProcessedCheckpoint(); final long maxSeqNo = tracker.getMaxSeqNo(); handler.accept(new ShardChangesAction.Response( 0L, diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 9da7e1522d2..f88b6542392 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -105,7 +105,8 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest leaderGroup.assertAllEqual(docCount); Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + assertThat(followerGroup.getPrimary().getLastKnownGlobalCheckpoint(), + equalTo(leaderGroup.getPrimary().getLastKnownGlobalCheckpoint())); followerGroup.assertAllEqual(indexedDocIds.size()); }); for (IndexShard shard : followerGroup) { @@ -119,7 +120,8 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest } leaderGroup.syncGlobalCheckpoint(); assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + assertThat(followerGroup.getPrimary().getLastKnownGlobalCheckpoint(), + equalTo(leaderGroup.getPrimary().getLastKnownGlobalCheckpoint())); followerGroup.assertAllEqual(indexedDocIds.size() - deleteDocIds.size()); }); shardFollowTask.markAsCompleted(); @@ -192,7 +194,8 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest leaderGroup.assertAllEqual(docCount); Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + assertThat(followerGroup.getPrimary().getLastKnownGlobalCheckpoint(), + equalTo(leaderGroup.getPrimary().getLastKnownGlobalCheckpoint())); followerGroup.assertAllEqual(indexedDocIds.size()); }); @@ -235,7 +238,8 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest leaderGroup.assertAllEqual(docCount); Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + assertThat(followerGroup.getPrimary().getLastKnownGlobalCheckpoint(), + equalTo(leaderGroup.getPrimary().getLastKnownGlobalCheckpoint())); followerGroup.assertAllEqual(indexedDocIds.size()); }); @@ -282,11 +286,12 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest // Simulates some bulk requests are completed on the primary and replicated to some (but all) replicas of the follower // but the primary of the follower crashed before these requests completed. for (int numBulks = between(1, 5), i = 0; i < numBulks; i++) { - long fromSeqNo = randomLongBetween(0, leadingPrimary.getGlobalCheckpoint()); - long toSeqNo = randomLongBetween(fromSeqNo, leadingPrimary.getGlobalCheckpoint()); + long fromSeqNo = randomLongBetween(0, leadingPrimary.getLastKnownGlobalCheckpoint()); + long toSeqNo = randomLongBetween(fromSeqNo, leadingPrimary.getLastKnownGlobalCheckpoint()); int numOps = Math.toIntExact(toSeqNo + 1 - fromSeqNo); - Translog.Operation[] ops = ShardChangesAction.getOperations(leadingPrimary, leadingPrimary.getGlobalCheckpoint(), - fromSeqNo, numOps, leadingPrimary.getHistoryUUID(), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)); + Translog.Operation[] ops = ShardChangesAction.getOperations(leadingPrimary, + leadingPrimary.getLastKnownGlobalCheckpoint(), fromSeqNo, numOps, leadingPrimary.getHistoryUUID(), + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)); IndexShard followingPrimary = followerGroup.getPrimary(); TransportWriteAction.WritePrimaryResult primaryResult = @@ -296,7 +301,7 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest for (IndexShard replica : randomSubsetOf(followerGroup.getReplicas())) { final PlainActionFuture permitFuture = new PlainActionFuture<>(); replica.acquireReplicaOperationPermit(followingPrimary.getOperationPrimaryTerm(), - followingPrimary.getGlobalCheckpoint(), followingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), + followingPrimary.getLastKnownGlobalCheckpoint(), followingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), permitFuture, ThreadPool.Names.SAME, primaryResult); try (Releasable ignored = permitFuture.get()) { TransportBulkShardOperationsAction.shardOperationOnReplica(primaryResult.replicaRequest(), replica, logger); @@ -308,13 +313,14 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); shardFollowTask.start(followerGroup.getPrimary().getHistoryUUID(), - leadingPrimary.getGlobalCheckpoint(), + leadingPrimary.getLastKnownGlobalCheckpoint(), leadingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), followerSeqNoStats.getGlobalCheckpoint(), followerSeqNoStats.getMaxSeqNo()); try { assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leadingPrimary.getGlobalCheckpoint())); + assertThat(followerGroup.getPrimary().getLastKnownGlobalCheckpoint(), + equalTo(leadingPrimary.getLastKnownGlobalCheckpoint())); assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, true); }); } finally { @@ -380,9 +386,9 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest ShardFollowNodeTask followTask = createShardFollowTask(leader, follower); followTask.start( follower.getPrimary().getHistoryUUID(), - leader.getPrimary().getGlobalCheckpoint(), + leader.getPrimary().getLastKnownGlobalCheckpoint(), leader.getPrimary().seqNoStats().getMaxSeqNo(), - follower.getPrimary().getGlobalCheckpoint(), + follower.getPrimary().getLastKnownGlobalCheckpoint(), follower.getPrimary().seqNoStats().getMaxSeqNo() ); leader.appendDocs(between(0, 100)); @@ -403,9 +409,9 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest final ShardFollowNodeTask task = createShardFollowTask(leader, follower); task.start( follower.getPrimary().getHistoryUUID(), - leader.getPrimary().getGlobalCheckpoint(), + leader.getPrimary().getLastKnownGlobalCheckpoint(), leader.getPrimary().seqNoStats().getMaxSeqNo(), - follower.getPrimary().getGlobalCheckpoint(), + follower.getPrimary().getLastKnownGlobalCheckpoint(), follower.getPrimary().seqNoStats().getMaxSeqNo()); final Scheduler.Cancellable renewable = task.getRenewable(); assertNotNull(renewable); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java index 856b6da2f9d..43302a5177e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java @@ -142,7 +142,7 @@ public class BulkShardOperationsTests extends IndexShardTestCase { newPrimary.getHistoryUUID(), Stream.concat(secondBulk.stream(), existingOps.stream()).collect(Collectors.toList()), seqno, newPrimary, logger); final long newPrimaryTerm = newPrimary.getOperationPrimaryTerm(); - final long globalCheckpoint = newPrimary.getGlobalCheckpoint(); + final long globalCheckpoint = newPrimary.getLastKnownGlobalCheckpoint(); final List appliedOperations = Stream.concat( secondBulk.stream().map(op -> rewriteOperationWithPrimaryTerm(op, newPrimaryTerm)), existingOps.stream().filter(op -> op.seqNo() > globalCheckpoint).map(op -> rewriteOperationWithPrimaryTerm(op, oldPrimaryTerm)) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index 4a56d6370eb..98bfa1b2068 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -337,7 +337,7 @@ public class FollowingEngineTests extends ESTestCase { for (int i = 0; i < numDocs; i++) { leader.index(indexForPrimary(Integer.toString(i))); } - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(-1L)); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numDocs)); assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); @@ -350,7 +350,7 @@ public class FollowingEngineTests extends ESTestCase { leader.delete(deleteForPrimary(Integer.toString(i))); } } - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(leader.getMaxSeqNoOfUpdatesOrDeletes())); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numDocs)); assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); @@ -362,7 +362,7 @@ public class FollowingEngineTests extends ESTestCase { docIds.add(docId); leader.index(indexForPrimary(docId)); } - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(leader.getMaxSeqNoOfUpdatesOrDeletes())); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numDocs + moreDocs)); assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); @@ -378,7 +378,7 @@ public class FollowingEngineTests extends ESTestCase { runFollowTest((leader, follower) -> { EngineTestCase.concurrentlyApplyOps(ops, leader); assertThat(follower.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(-1L)); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo((long) numOps)); }); } @@ -396,13 +396,13 @@ public class FollowingEngineTests extends ESTestCase { Randomness.shuffle(ops); runFollowTest((leader, follower) -> { EngineTestCase.concurrentlyApplyOps(ops, leader); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); final List appendOps = new ArrayList<>(); for (int numAppends = scaledRandomIntBetween(0, 100), i = 0; i < numAppends; i++) { appendOps.add(indexForPrimary("append-" + i)); } EngineTestCase.concurrentlyApplyOps(appendOps, leader); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), greaterThanOrEqualTo((long) appendOps.size())); }); } @@ -410,19 +410,19 @@ public class FollowingEngineTests extends ESTestCase { public void testOptimizeSingleDocSequentially() throws Exception { runFollowTest((leader, follower) -> { leader.index(indexForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(1L)); leader.delete(deleteForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(1L)); leader.index(indexForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(2L)); leader.index(indexForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(2L)); }); } @@ -432,20 +432,20 @@ public class FollowingEngineTests extends ESTestCase { Randomness.shuffle(ops); runFollowTest((leader, follower) -> { EngineTestCase.concurrentlyApplyOps(ops, leader); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); long numOptimized = follower.getNumberOfOptimizedIndexing(); leader.delete(deleteForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numOptimized)); leader.index(indexForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numOptimized + 1L)); leader.index(indexForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numOptimized + 1L)); }); } @@ -454,7 +454,7 @@ public class FollowingEngineTests extends ESTestCase { final CheckedBiConsumer wrappedTask = (leader, follower) -> { Thread[] threads = new Thread[between(1, 8)]; AtomicBoolean taskIsCompleted = new AtomicBoolean(); - AtomicLong lastFetchedSeqNo = new AtomicLong(follower.getLocalCheckpoint()); + AtomicLong lastFetchedSeqNo = new AtomicLong(follower.getProcessedLocalCheckpoint()); CountDownLatch latch = new CountDownLatch(threads.length + 1); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread(() -> { @@ -472,7 +472,7 @@ public class FollowingEngineTests extends ESTestCase { latch.countDown(); latch.await(); task.accept(leader, follower); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); } finally { taskIsCompleted.set(true); for (Thread thread : threads) { @@ -516,7 +516,7 @@ public class FollowingEngineTests extends ESTestCase { final MapperService mapperService = EngineTestCase.createMapperService("test"); final TranslogHandler translogHandler = new TranslogHandler(xContentRegistry(), follower.config().getIndexSettings()); while (stopped.get() == false) { - final long checkpoint = leader.getLocalCheckpoint(); + final long checkpoint = leader.getProcessedLocalCheckpoint(); final long lastSeqNo = lastFetchedSeqNo.get(); if (lastSeqNo < checkpoint) { final long nextSeqNo = randomLongBetween(lastSeqNo + 1, checkpoint); @@ -607,7 +607,7 @@ public class FollowingEngineTests extends ESTestCase { } } // Primary should reject duplicates - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), followingEngine.getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), followingEngine.getProcessedLocalCheckpoint())); final long newTerm = randomLongBetween(oldTerm + 1, Long.MAX_VALUE); for (Engine.Operation op : operations) { Engine.Result result = applyOperation(followingEngine, op, newTerm, Engine.Operation.Origin.PRIMARY); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java index ee35fe3d21e..19d4d6ab6ee 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.dataframe.transforms; +import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -14,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -21,8 +23,10 @@ import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.dataframe.utils.TimeUtils; import java.io.IOException; +import java.time.Instant; import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -42,6 +46,8 @@ public class DataFrameTransformConfig extends AbstractDiffable STRICT_PARSER = createParser(false); private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); private static final int MAX_DESCRIPTION_LENGTH = 1_000; @@ -53,9 +59,17 @@ public class DataFrameTransformConfig extends AbstractDiffable headers; + private Version transformVersion; + private Instant createTime; private final PivotConfig pivotConfig; + private static void validateStrictParsingParams(Object arg, String parameterName) { + if (arg != null) { + throw new IllegalArgumentException("Found [" + parameterName + "], not allowed for strict parsing"); + } + } + private static ConstructingObjectParser createParser(boolean lenient) { ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, lenient, (args, optionalId) -> { @@ -74,9 +88,11 @@ public class DataFrameTransformConfig extends AbstractDiffable p.mapStrings(), HEADERS); parser.declareObject(optionalConstructorArg(), (p, c) -> PivotConfig.fromXContent(p, lenient), PIVOT_TRANSFORM); parser.declareString(optionalConstructorArg(), DESCRIPTION); - + parser.declareField(optionalConstructorArg(), + p -> TimeUtils.parseTimeFieldToInstant(p, CREATE_TIME.getPreferredName()), CREATE_TIME, ObjectParser.ValueType.VALUE); + parser.declareString(optionalConstructorArg(), VERSION); return parser; } @@ -103,12 +128,14 @@ public class DataFrameTransformConfig extends AbstractDiffable headers, - final PivotConfig pivotConfig, - final String description) { + DataFrameTransformConfig(final String id, + final SourceConfig source, + final DestConfig dest, + final Map headers, + final PivotConfig pivotConfig, + final String description, + final Instant createTime, + final String version){ this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); this.source = ExceptionsHelper.requireNonNull(source, DataFrameField.SOURCE.getPreferredName()); this.dest = ExceptionsHelper.requireNonNull(dest, DataFrameField.DESTINATION.getPreferredName()); @@ -123,6 +150,17 @@ public class DataFrameTransformConfig extends AbstractDiffable MAX_DESCRIPTION_LENGTH) { throw new IllegalArgumentException("[description] must be less than 1000 characters in length."); } + this.createTime = createTime == null ? null : Instant.ofEpochMilli(createTime.toEpochMilli()); + this.transformVersion = version == null ? null : Version.fromString(version); + } + + public DataFrameTransformConfig(final String id, + final SourceConfig source, + final DestConfig dest, + final Map headers, + final PivotConfig pivotConfig, + final String description) { + this(id, source, dest, headers, pivotConfig, description, null, null); } public DataFrameTransformConfig(final StreamInput in) throws IOException { @@ -132,6 +170,13 @@ public class DataFrameTransformConfig extends AbstractDiffable headers) { + public DataFrameTransformConfig setHeaders(Map headers) { this.headers = headers; + return this; + } + + public Version getVersion() { + return transformVersion; + } + + public DataFrameTransformConfig setVersion(Version transformVersion) { + this.transformVersion = transformVersion; + return this; + } + + public Instant getCreateTime() { + return createTime; + } + + public DataFrameTransformConfig setCreateTime(Instant createTime) { + ExceptionsHelper.requireNonNull(createTime, CREATE_TIME.getPreferredName()); + this.createTime = Instant.ofEpochMilli(createTime.toEpochMilli()); + return this; } public PivotConfig getPivotConfig() { @@ -179,6 +244,15 @@ public class DataFrameTransformConfig extends AbstractDiffable listener) { client.execute(PutUserAction.INSTANCE, request, listener); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Validation.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Validation.java index 55caed43411..5c2c0304291 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Validation.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Validation.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.security.support; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; @@ -81,10 +82,10 @@ public final class Validation { return null; } - public static Error validatePassword(char[] password) { - return password.length >= MIN_PASSWD_LENGTH ? - null : - new Error("passwords must be at least [" + MIN_PASSWD_LENGTH + "] characters long"); + public static Error validatePassword(SecureString password) { + return password.length() >= MIN_PASSWD_LENGTH ? + null : + new Error("passwords must be at least [" + MIN_PASSWD_LENGTH + "] characters long"); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java index 4cf21cfbecd..b03631550be 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java @@ -188,7 +188,7 @@ public class FrozenEngineTests extends EngineTestCase { if (rarely()) { engine.flush(); } - globalCheckpoint.set(engine.getLocalCheckpoint()); + globalCheckpoint.set(engine.getProcessedLocalCheckpoint()); } engine.syncTranslog(); return numDocsAdded; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java index 460b075c22b..dd99692df8f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java @@ -371,7 +371,7 @@ public class FrozenIndexTests extends ESSingleNodeTestCase { final Index index = client().admin().cluster().prepareState().get().getState().metaData().index(indexName).getIndex(); final IndexService indexService = indicesService.indexService(index); assertThat(indexService.hasShard(0), is(true)); - assertThat(indexService.getShard(0).getGlobalCheckpoint(), greaterThanOrEqualTo(nbNoOps - 1L)); + assertThat(indexService.getShard(0).getLastKnownGlobalCheckpoint(), greaterThanOrEqualTo(nbNoOps - 1L)); }); assertAcked(new XPackClient(client()).freeze(new TransportFreezeIndexAction.FreezeRequest(indexName))); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java index a735b5a02ac..907c8eb98e6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.dataframe.transforms; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.DeprecationHandler; @@ -18,6 +19,7 @@ import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfigTests; import org.junit.Before; import java.io.IOException; +import java.time.Instant; import java.util.HashMap; import java.util.Map; @@ -41,13 +43,25 @@ public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameT } public static DataFrameTransformConfig randomDataFrameTransformConfigWithoutHeaders(String id) { - return new DataFrameTransformConfig(id, randomSourceConfig(), randomDestConfig(), null, - PivotConfigTests.randomPivotConfig(), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000)); + return new DataFrameTransformConfig(id, + randomSourceConfig(), + randomDestConfig(), + null, + PivotConfigTests.randomPivotConfig(), + randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000), + null, + null); } public static DataFrameTransformConfig randomDataFrameTransformConfig(String id) { - return new DataFrameTransformConfig(id, randomSourceConfig(), randomDestConfig(), randomHeaders(), - PivotConfigTests.randomPivotConfig(), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000)); + return new DataFrameTransformConfig(id, + randomSourceConfig(), + randomDestConfig(), + randomHeaders(), + PivotConfigTests.randomPivotConfig(), + randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000), + randomBoolean() ? null : Instant.now(), + randomBoolean() ? null : Version.CURRENT.toString()); } public static DataFrameTransformConfig randomInvalidDataFrameTransformConfig() { @@ -147,6 +161,48 @@ public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameT () -> createDataFrameTransformConfigFromString(pivotTransform, "test_header_injection")); } + public void testPreventCreateTimeInjection() throws IOException { + String pivotTransform = "{" + + " \"create_time\" : " + Instant.now().toEpochMilli() + " }," + + " \"source\" : {\"index\":\"src\"}," + + " \"dest\" : {\"index\": \"dest\"}," + + " \"pivot\" : {" + + " \"group_by\": {" + + " \"id\": {" + + " \"terms\": {" + + " \"field\": \"id\"" + + "} } }," + + " \"aggs\": {" + + " \"avg\": {" + + " \"avg\": {" + + " \"field\": \"points\"" + + "} } } } }"; + + expectThrows(IllegalArgumentException.class, + () -> createDataFrameTransformConfigFromString(pivotTransform, "test_createTime_injection")); + } + + public void testPreventVersionInjection() throws IOException { + String pivotTransform = "{" + + " \"version\" : \"7.3.0\"," + + " \"source\" : {\"index\":\"src\"}," + + " \"dest\" : {\"index\": \"dest\"}," + + " \"pivot\" : {" + + " \"group_by\": {" + + " \"id\": {" + + " \"terms\": {" + + " \"field\": \"id\"" + + "} } }," + + " \"aggs\": {" + + " \"avg\": {" + + " \"avg\": {" + + " \"field\": \"points\"" + + "} } } } }"; + + expectThrows(IllegalArgumentException.class, + () -> createDataFrameTransformConfigFromString(pivotTransform, "test_createTime_injection")); + } + public void testXContentForInternalStorage() throws IOException { DataFrameTransformConfig dataFrameTransformConfig = randomDataFrameTransformConfig(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java index 7900eaba4c8..b9eb0241d9a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java @@ -19,12 +19,14 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.misc.SweetSpotSimilarity; import org.apache.lucene.search.BulkScorer; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.Weight; @@ -198,11 +200,25 @@ public class SecurityIndexSearcherWrapperUnitTests extends ESTestCase { }); DirectoryReader directoryReader = DocumentSubsetReader.wrap(esIn, bitsetFilterCache, new MatchAllDocsQuery()); IndexSearcher indexSearcher = new IndexSearcher(directoryReader); + indexSearcher.setSimilarity(new SweetSpotSimilarity()); + indexSearcher.setQueryCachingPolicy(new QueryCachingPolicy() { + @Override + public void onUse(Query query) { + } + + @Override + public boolean shouldCache(Query query) { + return false; + } + }); + indexSearcher.setQueryCache((weight, policy) -> weight); securityIndexSearcherWrapper = new SecurityIndexSearcherWrapper(null, null, threadContext, licenseState, scriptService); IndexSearcher result = securityIndexSearcherWrapper.wrap(indexSearcher); assertThat(result, not(sameInstance(indexSearcher))); assertThat(result.getSimilarity(), sameInstance(indexSearcher.getSimilarity())); + assertThat(result.getQueryCachingPolicy(), sameInstance(indexSearcher.getQueryCachingPolicy())); + assertThat(result.getQueryCache(), sameInstance(indexSearcher.getQueryCache())); bitsetFilterCache.close(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/support/ValidationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/support/ValidationTests.java index 458280677a4..b6226ec5c18 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/support/ValidationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/support/ValidationTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.security.support; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; @@ -59,12 +60,12 @@ public class ValidationTests extends ESTestCase { } public void testUsersValidatePassword() throws Exception { - String passwd = randomAlphaOfLength(randomIntBetween(0, 20)); + SecureString passwd = new SecureString(randomAlphaOfLength(randomIntBetween(0, 20)).toCharArray()); logger.info("{}[{}]", passwd, passwd.length()); if (passwd.length() >= 6) { - assertThat(Users.validatePassword(passwd.toCharArray()), nullValue()); + assertThat(Users.validatePassword(passwd), nullValue()); } else { - assertThat(Users.validatePassword(passwd.toCharArray()), notNullValue()); + assertThat(Users.validatePassword(passwd), notNullValue()); } } diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java index 805252b465b..103ea2e9100 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java @@ -15,6 +15,8 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; @@ -57,6 +59,7 @@ import java.time.ZoneId; import java.util.Base64; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -118,6 +121,11 @@ abstract class DataFrameIntegTestCase extends ESRestTestCase { return restClient.dataFrame().getDataFrameTransformStats(new GetDataFrameTransformStatsRequest(id), RequestOptions.DEFAULT); } + protected GetDataFrameTransformResponse getDataFrameTransform(String id) throws IOException { + RestHighLevelClient restClient = new TestRestHighLevelClient(); + return restClient.dataFrame().getDataFrameTransform(new GetDataFrameTransformRequest(id), RequestOptions.DEFAULT); + } + protected void waitUntilCheckpoint(String id, long checkpoint) throws Exception { waitUntilCheckpoint(id, checkpoint, TimeValue.timeValueSeconds(30)); } @@ -321,9 +329,11 @@ abstract class DataFrameIntegTestCase extends ESRestTestCase { .build(); } - private class TestRestHighLevelClient extends RestHighLevelClient { + private static class TestRestHighLevelClient extends RestHighLevelClient { + private static final List X_CONTENT_ENTRIES = + new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedXContents(); TestRestHighLevelClient() { - super(client(), restClient -> {}, Collections.emptyList()); + super(client(), restClient -> {}, X_CONTENT_ENTRIES); } } } diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java index c4c5ca3c130..174a956eb3c 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.elasticsearch.Version; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.core.IndexerState; import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; @@ -17,6 +18,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInter import org.junit.After; import java.io.IOException; +import java.time.Instant; import java.util.HashMap; import java.util.Map; @@ -58,6 +60,11 @@ public class DataFrameTransformIT extends DataFrameIntegTestCase { assertThat(getDataFrameTransformStats(config.getId()).getTransformsStateAndStats().get(0).getTransformState().getIndexerState(), equalTo(IndexerState.STOPPED))); stopDataFrameTransform(config.getId()); + + DataFrameTransformConfig storedConfig = getDataFrameTransform(config.getId()).getTransformConfigurations().get(0); + assertThat(storedConfig.getVersion(), equalTo(Version.CURRENT)); + Instant now = Instant.now(); + assertTrue("[create_time] is not before current time", storedConfig.getCreateTime().isBefore(now)); deleteDataFrameTransform(config.getId()); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java index 049b0804f45..36023c0f737 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.dataframe.action; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; @@ -51,6 +52,7 @@ import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigMa import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -110,8 +112,10 @@ public class TransportPutDataFrameTransformAction .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - DataFrameTransformConfig config = request.getConfig(); - config.setHeaders(filteredHeaders); + DataFrameTransformConfig config = request.getConfig() + .setHeaders(filteredHeaders) + .setCreateTime(Instant.now()) + .setVersion(Version.CURRENT); String transformId = config.getId(); // quick check whether a transform has already been created under that name diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/NetworkDisruptionIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/NetworkDisruptionIT.java index e05263014d3..8a257baa3d6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/NetworkDisruptionIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/NetworkDisruptionIT.java @@ -38,12 +38,6 @@ public class NetworkDisruptionIT extends BaseMlIntegTestCase { return plugins; } - // Remove this once the AwaitsFix below has been resolved - public void testDummy() { - assertTrue(true); - } - - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/39858") public void testJobRelocation() throws Exception { internalCluster().ensureAtLeastNumDataNodes(5); ensureStableCluster(5); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index 27aada5cdef..a6d0b11f1c2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -353,7 +353,7 @@ public abstract class BaseMlIntegTestCase extends ESIntegTestCase { protected String awaitJobOpenedAndAssigned(String jobId, String queryNode) throws Exception { PersistentTasksClusterService persistentTasksClusterService = - internalCluster().getInstance(PersistentTasksClusterService.class, internalCluster().getMasterName()); + internalCluster().getInstance(PersistentTasksClusterService.class, internalCluster().getMasterName(queryNode)); // Speed up rechecks to a rate that is quicker than what settings would allow. // The check would work eventually without doing this, but the assertBusy() below // would need to wait 30 seconds, which would make the test run very slowly. diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java index 5926cdbc01c..866f3722e6e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java @@ -193,7 +193,7 @@ public class SetupPasswordTool extends LoggingAwareMultiCommand { // loop for two consecutive good passwords while (true) { SecureString password1 = new SecureString(terminal.readSecret("Enter password for [" + user + "]: ")); - Validation.Error err = Validation.Users.validatePassword(password1.getChars()); + Validation.Error err = Validation.Users.validatePassword(password1); if (err != null) { terminal.println(err.toString()); terminal.println("Try again."); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java index 6007ef5fd6d..6d51fc5df93 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.authc.file.tool; import joptsimple.OptionSet; import joptsimple.OptionSpec; - import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.LoggingAwareMultiCommand; @@ -111,7 +110,7 @@ public class UsersTool extends LoggingAwareMultiCommand { throw new UserException(ExitCodes.DATA_ERROR, "Invalid username [" + username + "]... " + validationError); } - char[] password = parsePassword(terminal, passwordOption.value(options)); + final char[] passwordHash = getPasswordHash(terminal, env, passwordOption.value(options)); String[] roles = parseRoles(terminal, env, rolesOption.value(options)); Path passwordFile = FileUserPasswdStore.resolveFile(env); @@ -125,9 +124,8 @@ public class UsersTool extends LoggingAwareMultiCommand { if (users.containsKey(username)) { throw new UserException(ExitCodes.CODE_ERROR, "User [" + username + "] already exists"); } - final Hasher hasher = Hasher.resolve(XPackSettings.PASSWORD_HASHING_ALGORITHM.get(env.settings())); users = new HashMap<>(users); // make modifiable - users.put(username, hasher.hash(new SecureString(password))); + users.put(username, passwordHash); FileUserPasswdStore.writeFile(users, passwordFile); if (roles.length > 0) { @@ -218,7 +216,7 @@ public class UsersTool extends LoggingAwareMultiCommand { protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { String username = parseUsername(arguments.values(options), env.settings()); - char[] password = parsePassword(terminal, passwordOption.value(options)); + char[] passwordHash = getPasswordHash(terminal, env, passwordOption.value(options)); Path file = FileUserPasswdStore.resolveFile(env); FileAttributesChecker attributesChecker = new FileAttributesChecker(file); @@ -229,9 +227,8 @@ public class UsersTool extends LoggingAwareMultiCommand { if (users.containsKey(username) == false) { throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist"); } - final Hasher hasher = Hasher.resolve(XPackSettings.PASSWORD_HASHING_ALGORITHM.get(env.settings())); users = new HashMap<>(users); // make modifiable - users.put(username, hasher.hash(new SecureString(password))); + users.put(username, passwordHash); FileUserPasswdStore.writeFile(users, file); attributesChecker.check(terminal); @@ -440,23 +437,32 @@ public class UsersTool extends LoggingAwareMultiCommand { return username; } + private static char[] getPasswordHash(Terminal terminal, Environment env, String cliPasswordValue) throws UserException { + final Hasher hasher = Hasher.resolve(XPackSettings.PASSWORD_HASHING_ALGORITHM.get(env.settings())); + final char[] passwordHash; + try (SecureString password = parsePassword(terminal, cliPasswordValue)) { + passwordHash = hasher.hash(password); + } + return passwordHash; + } + // pkg private for testing - static char[] parsePassword(Terminal terminal, String passwordStr) throws UserException { - char[] password; + static SecureString parsePassword(Terminal terminal, String passwordStr) throws UserException { + SecureString password; if (passwordStr != null) { - password = passwordStr.toCharArray(); + password = new SecureString(passwordStr.toCharArray()); Validation.Error validationError = Users.validatePassword(password); if (validationError != null) { throw new UserException(ExitCodes.DATA_ERROR, "Invalid password..." + validationError); } } else { - password = terminal.readSecret("Enter new password: "); + password = new SecureString(terminal.readSecret("Enter new password: ")); Validation.Error validationError = Users.validatePassword(password); if (validationError != null) { throw new UserException(ExitCodes.DATA_ERROR, "Invalid password..." + validationError); } char[] retyped = terminal.readSecret("Retype new password: "); - if (Arrays.equals(password, retyped) == false) { + if (Arrays.equals(password.getChars(), retyped) == false) { throw new UserException(ExitCodes.DATA_ERROR, "Password mismatch"); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index 9e7371f95ed..5b12df0584f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -124,7 +124,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { public void testDeletingNonexistingUserAndRole() throws Exception { SecurityClient c = securityClient(); // first create the index so it exists - c.preparePutUser("joe", "s3kirt".toCharArray(), hasher, "role1", "user").get(); + c.preparePutUser("joe", new SecureString("s3krit".toCharArray()), hasher, "role1", "user").get(); DeleteUserResponse resp = c.prepareDeleteUser("missing").get(); assertFalse("user shouldn't be found", resp.found()); DeleteRoleResponse resp2 = c.prepareDeleteRole("role").get(); @@ -144,7 +144,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { final List existingUsers = Arrays.asList(c.prepareGetUsers().get().users()); final int existing = existingUsers.size(); logger.error("--> creating user"); - c.preparePutUser("joe", "s3kirt".toCharArray(), hasher, "role1", "user").get(); + c.preparePutUser("joe", new SecureString("s3kirt".toCharArray()), hasher, "role1", "user").get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); logger.info("--> retrieving user"); @@ -155,8 +155,8 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { assertArrayEquals(joe.roles(), new String[]{"role1", "user"}); logger.info("--> adding two more users"); - c.preparePutUser("joe2", "s3kirt2".toCharArray(), hasher, "role2", "user").get(); - c.preparePutUser("joe3", "s3kirt3".toCharArray(), hasher, "role3", "user").get(); + c.preparePutUser("joe2", new SecureString("s3kirt2".toCharArray()), hasher, "role2", "user").get(); + c.preparePutUser("joe3", new SecureString("s3kirt3".toCharArray()), hasher, "role3", "user").get(); GetUsersResponse allUsersResp = c.prepareGetUsers().get(); assertTrue("users should exist", allUsersResp.hasUsers()); assertEquals("should be " + (3 + existing) + " users total", 3 + existing, allUsersResp.users().length); @@ -250,7 +250,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); logger.error("--> creating user"); - c.preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role").get(); + c.preparePutUser("joe", new SecureString("s3krit".toCharArray()), hasher, "test_role").get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); logger.info("--> retrieving user"); @@ -262,7 +262,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { // Index a document with the default test user client().prepareIndex("idx", "doc", "1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); - String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + String token = basicAuthHeaderValue("joe", new SecureString("s3krit")); SearchResponse searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); assertEquals(1L, searchResp.getHits().getTotalHits().value); @@ -271,7 +271,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { public void testUpdatingUserAndAuthentication() throws Exception { SecurityClient c = securityClient(); logger.error("--> creating user"); - c.preparePutUser("joe", "s3krit".toCharArray(), hasher, SecuritySettingsSource.TEST_ROLE).get(); + c.preparePutUser("joe", new SecureString("s3krit".toCharArray()), hasher, SecuritySettingsSource.TEST_ROLE).get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); logger.info("--> retrieving user"); @@ -283,12 +283,12 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { ensureGreen("idx"); // Index a document with the default test user client().prepareIndex("idx", "doc", "1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); - String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + String token = basicAuthHeaderValue("joe", new SecureString("s3krit")); SearchResponse searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); assertEquals(1L, searchResp.getHits().getTotalHits().value); - c.preparePutUser("joe", "s3krit2".toCharArray(), hasher, SecuritySettingsSource.TEST_ROLE).get(); + c.preparePutUser("joe", new SecureString("s3krit2".toCharArray()), hasher, SecuritySettingsSource.TEST_ROLE).get(); try { client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); @@ -298,7 +298,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { assertThat(e.status(), is(RestStatus.UNAUTHORIZED)); } - token = basicAuthHeaderValue("joe", new SecureString("s3krit2".toCharArray())); + token = basicAuthHeaderValue("joe", new SecureString("s3krit2")); searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); assertEquals(1L, searchResp.getHits().getTotalHits().value); } @@ -306,7 +306,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { public void testCreateDeleteAuthenticate() { SecurityClient c = securityClient(); logger.error("--> creating user"); - c.preparePutUser("joe", "s3krit".toCharArray(), hasher, + c.preparePutUser("joe", new SecureString("s3krit".toCharArray()), hasher, SecuritySettingsSource.TEST_ROLE).get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); @@ -319,7 +319,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { ensureGreen("idx"); // Index a document with the default test user client().prepareIndex("idx", "doc", "1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); - String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + String token = basicAuthHeaderValue("joe", new SecureString("s3krit")); SearchResponse searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); assertEquals(1L, searchResp.getHits().getTotalHits().value); @@ -345,12 +345,12 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); logger.error("--> creating user"); - c.preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role").get(); + c.preparePutUser("joe", new SecureString("s3krit".toCharArray()), hasher, "test_role").get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); if (authenticate) { - final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + final String token = basicAuthHeaderValue("joe", new SecureString("s3krit")); ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster() .prepareHealth().get(); assertFalse(response.isTimedOut()); @@ -394,7 +394,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { .addIndices(new String[]{"*"}, new String[]{"create_index"}, null, null, null, true) .get(); logger.error("--> creating user"); - securityClient().preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role", "snapshot_user").get(); + securityClient().preparePutUser("joe", new SecureString("s3krit".toCharArray()), hasher, "test_role", "snapshot_user").get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); logger.info("--> creating repository"); @@ -404,7 +404,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { .put("location", randomRepoPath()) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); - final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + final String token = basicAuthHeaderValue("joe", new SecureString("s3krit")); // joe can snapshot all indices, including '.security' SnapshotInfo snapshotInfo = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster() .prepareCreateSnapshot("test-repo", "test-snap-1") @@ -458,11 +458,11 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); - c.preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role").get(); + c.preparePutUser("joe", new SecureString("s3krit".toCharArray()), hasher, "test_role").get(); logger.error("--> waiting for .security index"); ensureGreen(SECURITY_MAIN_ALIAS); - final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + final String token = basicAuthHeaderValue("joe", new SecureString("s3krit")); ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster() .prepareHealth().get(); assertFalse(response.isTimedOut()); @@ -492,7 +492,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { assertThat(client.prepareGetUsers("joes").get().hasUsers(), is(false)); // check that putting a user without a password fails if the user doesn't exist try { - client.preparePutUser("joe", null, hasher, "admin_role").get(); + client.preparePutUser("joe", (SecureString) null, hasher, "admin_role").get(); fail("cannot create a user without a password"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("password must be specified")); @@ -501,8 +501,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { assertThat(client.prepareGetUsers("joes").get().hasUsers(), is(false)); // create joe with a password and verify the user works - client.preparePutUser("joe", SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), - hasher, "admin_role").get(); + client.preparePutUser("joe", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING, hasher, "admin_role").get(); assertThat(client.prepareGetUsers("joe").get().hasUsers(), is(true)); final String token = basicAuthHeaderValue("joe", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster() @@ -510,7 +509,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { assertFalse(response.isTimedOut()); // modify joe without sending the password - client.preparePutUser("joe", null, hasher, "read_role").fullName("Joe Smith").get(); + client.preparePutUser("joe", (SecureString) null, hasher, "read_role").fullName("Joe Smith").get(); GetUsersResponse getUsersResponse = client.prepareGetUsers("joe").get(); assertThat(getUsersResponse.hasUsers(), is(true)); assertThat(getUsersResponse.users().length, is(1)); @@ -531,7 +530,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { // update the user with password and admin role again String secondPassword = SecuritySettingsSourceField.TEST_PASSWORD + "2"; - client.preparePutUser("joe", secondPassword.toCharArray(), hasher, "admin_role"). + client.preparePutUser("joe", new SecureString(secondPassword.toCharArray()), hasher, "admin_role"). fullName("Joe Smith").get(); getUsersResponse = client.prepareGetUsers("joe").get(); assertThat(getUsersResponse.hasUsers(), is(true)); @@ -560,8 +559,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { public void testCannotCreateUserWithShortPassword() throws Exception { SecurityClient client = securityClient(); try { - client.preparePutUser("joe", randomAlphaOfLengthBetween(0, 5).toCharArray(), hasher, - "admin_role").get(); + client.preparePutUser("joe", new SecureString(randomAlphaOfLengthBetween(0, 5).toCharArray()), hasher, "admin_role").get(); fail("cannot create a user without a password < 6 characters"); } catch (IllegalArgumentException v) { assertThat(v.getMessage().contains("password"), is(true)); @@ -571,8 +569,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { public void testCannotCreateUserWithInvalidCharactersInName() throws Exception { SecurityClient client = securityClient(); IllegalArgumentException v = expectThrows(IllegalArgumentException.class, - () -> client.preparePutUser("fóóbár", "my-am@zing-password".toCharArray(), hasher, - "admin_role").get() + () -> client.preparePutUser("fóóbár", new SecureString("my-am@zing-password".toCharArray()), hasher, "admin_role").get() ); assertThat(v.getMessage(), containsString("names must be")); } @@ -582,7 +579,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { SecurityClient client = securityClient(); if (randomBoolean()) { - client.preparePutUser("joe", "s3krit".toCharArray(), hasher, + client.preparePutUser("joe", new SecureString("s3krit".toCharArray()), hasher, SecuritySettingsSource.TEST_ROLE).get(); } else { client.preparePutRole("read_role") @@ -602,8 +599,8 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { public void testOperationsOnReservedUsers() throws Exception { final String username = randomFrom(ElasticUser.NAME, KibanaUser.NAME); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().preparePutUser(username, randomBoolean() ? SecuritySettingsSourceField.TEST_PASSWORD.toCharArray() - : null, hasher, "admin").get()); + () -> securityClient().preparePutUser(username, + randomBoolean() ? SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING : null, hasher, "admin").get()); assertThat(exception.getMessage(), containsString("user [" + username + "] is reserved")); exception = expectThrows(IllegalArgumentException.class, @@ -614,27 +611,25 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { () -> securityClient().prepareDeleteUser(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME).get()); assertThat(exception.getMessage(), containsString("user [" + AnonymousUser.DEFAULT_ANONYMOUS_USERNAME + "] is anonymous")); + final char[] foobar = "foobar".toCharArray(); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().prepareChangePassword(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME, "foobar".toCharArray(), - hasher).get()); + () -> securityClient().prepareChangePassword(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME, foobar, hasher).get()); assertThat(exception.getMessage(), containsString("user [" + AnonymousUser.DEFAULT_ANONYMOUS_USERNAME + "] is anonymous")); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().preparePutUser(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME, "foobar".toCharArray(), - hasher).get()); + () -> securityClient().preparePutUser(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME, new SecureString(foobar), hasher).get()); assertThat(exception.getMessage(), containsString("user [" + AnonymousUser.DEFAULT_ANONYMOUS_USERNAME + "] is anonymous")); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().preparePutUser(SystemUser.NAME, "foobar".toCharArray(), hasher).get()); + () -> securityClient().preparePutUser(SystemUser.NAME, new SecureString(foobar), hasher).get()); assertThat(exception.getMessage(), containsString("user [" + SystemUser.NAME + "] is internal")); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().prepareChangePassword(SystemUser.NAME, "foobar".toCharArray(), - hasher).get()); + () -> securityClient().prepareChangePassword(SystemUser.NAME, foobar, hasher).get()); assertThat(exception.getMessage(), containsString("user [" + SystemUser.NAME + "] is internal")); exception = expectThrows(IllegalArgumentException.class, - () -> securityClient().prepareDeleteUser(SystemUser.NAME).get()); + () -> securityClient().prepareDeleteUser(SystemUser.NAME).get()); assertThat(exception.getMessage(), containsString("user [" + SystemUser.NAME + "] is internal")); // get should work @@ -671,9 +666,9 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { } public void testCreateAndChangePassword() throws Exception { - securityClient().preparePutUser("joe", "s3krit".toCharArray(), hasher, + securityClient().preparePutUser("joe", new SecureString("s3krit".toCharArray()), hasher, SecuritySettingsSource.TEST_ROLE).get(); - final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + final String token = basicAuthHeaderValue("joe", new SecureString("s3krit")); ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)) .admin().cluster().prepareHealth().get(); assertThat(response.isTimedOut(), is(false)); @@ -760,8 +755,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { final int numNativeUsers = scaledRandomIntBetween(1, 32); SecurityClient securityClient = new SecurityClient(client()); for (int i = 0; i < numNativeUsers; i++) { - securityClient.preparePutUser("joe" + i, "s3krit".toCharArray(), hasher, - "superuser").get(); + securityClient.preparePutUser("joe" + i, new SecureString("s3krit".toCharArray()), hasher, "superuser").get(); } XPackUsageResponse response = new XPackUsageRequestBuilder(client()).get(); @@ -780,10 +774,9 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { } public void testSetEnabled() throws Exception { - - securityClient().preparePutUser("joe", "s3krit".toCharArray(), hasher, + securityClient().preparePutUser("joe", new SecureString("s3krit".toCharArray()), hasher, SecuritySettingsSource.TEST_ROLE).get(); - final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + final String token = basicAuthHeaderValue("joe", new SecureString("s3krit")); ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)) .admin().cluster().prepareHealth().get(); assertThat(response.isTimedOut(), is(false)); @@ -806,20 +799,20 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { public void testNegativeLookupsThenCreateRole() throws Exception { SecurityClient securityClient = new SecurityClient(client()); - securityClient.preparePutUser("joe", "s3krit".toCharArray(), hasher, "unknown_role").get(); + securityClient.preparePutUser("joe", new SecureString("s3krit".toCharArray()), hasher, "unknown_role").get(); final int negativeLookups = scaledRandomIntBetween(1, 10); for (int i = 0; i < negativeLookups; i++) { if (anonymousEnabled && roleExists) { ClusterHealthResponse response = client() .filterWithHeader(Collections.singletonMap("Authorization", - basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())))) + basicAuthHeaderValue("joe", new SecureString("s3krit")))) .admin().cluster().prepareHealth().get(); assertNoTimeout(response); } else { ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> client() .filterWithHeader(Collections.singletonMap("Authorization", - basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())))) + basicAuthHeaderValue("joe", new SecureString("s3krit")))) .admin().cluster().prepareHealth().get()); assertThat(e.status(), is(RestStatus.FORBIDDEN)); } @@ -828,7 +821,7 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { securityClient.preparePutRole("unknown_role").cluster("all").get(); ClusterHealthResponse response = client() .filterWithHeader(Collections.singletonMap("Authorization", - basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())))) + basicAuthHeaderValue("joe", new SecureString("s3krit")))) .admin().cluster().prepareHealth().get(); assertNoTimeout(response); } @@ -842,10 +835,9 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { * the loader returned a null value, while the other caller(s) would get a null value unexpectedly */ public void testConcurrentRunAs() throws Exception { - securityClient().preparePutUser("joe", "s3krit".toCharArray(), hasher, SecuritySettingsSource - .TEST_ROLE).get(); - securityClient().preparePutUser("executor", "s3krit".toCharArray(), hasher, "superuser").get(); - final String token = basicAuthHeaderValue("executor", new SecureString("s3krit".toCharArray())); + securityClient().preparePutUser("joe", new SecureString("s3krit".toCharArray()), hasher, SecuritySettingsSource.TEST_ROLE).get(); + securityClient().preparePutUser("executor", new SecureString("s3krit".toCharArray()), hasher, "superuser").get(); + final String token = basicAuthHeaderValue("executor", new SecureString("s3krit")); final Client client = client().filterWithHeader(MapBuilder.newMapBuilder() .put("Authorization", token) .put("es-security-runas-user", "joe") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolTests.java index 793e20888a5..e93950739a1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolTests.java @@ -416,7 +416,7 @@ public class SetupPasswordToolTests extends CommandTestCase { while (failCount-- > 0) { String password1 = randomAlphaOfLength(randomIntBetween(3, 10)); terminal.addSecretInput(password1); - Validation.Error err = Validation.Users.validatePassword(password1.toCharArray()); + Validation.Error err = Validation.Users.validatePassword(new SecureString(password1.toCharArray())); if (err == null) { // passes strength validation, fail by mismatch terminal.addSecretInput(password1 + "typo"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java index f507edf9787..91fca881982 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java @@ -32,7 +32,7 @@ public class SecurityScrollTests extends SecurityIntegTestCase { securityClient().preparePutRole("scrollable") .addIndices(new String[] { randomAlphaOfLengthBetween(4, 12) }, new String[] { "read" }, null, null, null, randomBoolean()) .get(); - securityClient().preparePutUser("other", SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), getFastStoredHashAlgoForTests(), + securityClient().preparePutUser("other", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING, getFastStoredHashAlgoForTests(), "scrollable") .get(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java index 55cd659509b..10a796f489c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.support; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; @@ -45,9 +46,10 @@ public class SecurityIndexManagerIntegTests extends SecurityIntegTestCase { @Override protected void doRun() throws Exception { final List requests = new ArrayList<>(numRequests); + final SecureString password = new SecureString("password".toCharArray()); for (int i = 0; i < numRequests; i++) { requests.add(securityClient() - .preparePutUser("user" + userNumber.getAndIncrement(), "password".toCharArray(), + .preparePutUser("user" + userNumber.getAndIncrement(), password, getFastStoredHashAlgoForTests(), randomAlphaOfLengthBetween(1, 16)) .request()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java index bc17626b1f4..d8282a3e759 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java @@ -76,7 +76,7 @@ public class IpFilteringIntegrationTests extends SecurityIntegTestCase { @SuppressForbidden(reason = "Allow opening socket for test") private void trySocketConnection(Socket socket, InetSocketAddress address) throws IOException { logger.info("connecting to {}", address); - SocketAccess.doPrivileged(() -> socket.connect(address, 500)); + SocketAccess.doPrivileged(() -> socket.connect(address, 5000)); assertThat(socket.isConnected(), is(true)); try (OutputStream os = socket.getOutputStream()) { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index 96d8ffe455c..7c7288d6a35 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -621,46 +621,7 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe } public void testNextPageText() throws IOException { - int size = 20; - String[] docs = new String[size]; - for (int i = 0; i < size; i++) { - docs[i] = "{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"; - } - index(docs); - - String request = "{\"query\":\"SELECT text, number, number + 5 AS sum FROM test ORDER BY number\", \"fetch_size\":2}"; - - String cursor = null; - for (int i = 0; i < 20; i += 2) { - Tuple response; - if (i == 0) { - response = runSqlAsText(StringUtils.EMPTY, new StringEntity(request, ContentType.APPLICATION_JSON), "text/plain"); - } else { - response = runSqlAsText(StringUtils.EMPTY, new StringEntity("{\"cursor\":\"" + cursor + "\"}", - ContentType.APPLICATION_JSON), "text/plain"); - } - - StringBuilder expected = new StringBuilder(); - if (i == 0) { - expected.append(" text | number | sum \n"); - expected.append("---------------+---------------+---------------\n"); - } - expected.append(String.format(Locale.ROOT, "%-15s|%-15d|%-15d\n", "text" + i, i, i + 5)); - expected.append(String.format(Locale.ROOT, "%-15s|%-15d|%-15d\n", "text" + (i + 1), i + 1, i + 6)); - cursor = response.v2(); - assertEquals(expected.toString(), response.v1()); - assertNotNull(cursor); - } - Map expected = new HashMap<>(); - expected.put("rows", emptyList()); - assertResponse(expected, runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), - StringUtils.EMPTY)); - - Map response = runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), - "/close"); - assertEquals(true, response.get("succeeded")); - - assertEquals(0, getNumberOfSearchContexts("test")); + executeQueryWithNextPage("text/plain", " text | number | sum \n", "%-15s|%-15d|%-15d\n"); } // CSV/TSV tests @@ -702,6 +663,10 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe Tuple response = runSqlAsText(query, "text/csv; header=absent"); assertEquals(expected, response.v1()); } + + public void testNextPageCSV() throws IOException { + executeQueryWithNextPage("text/csv; header=present", "text,number,sum\r\n", "%s,%d,%d\r\n"); + } public void testQueryInTSV() throws IOException { index("{\"name\":" + toJson("first") + ", \"number\" : 1 }", @@ -720,6 +685,55 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe response = runSqlAsTextFormat(query, "tsv"); assertEquals(expected, response.v1()); } + + public void testNextPageTSV() throws IOException { + executeQueryWithNextPage("text/tab-separated-values", "text\tnumber\tsum\n", "%s\t%d\t%d\n"); + } + + private void executeQueryWithNextPage(String format, String expectedHeader, String expectedLineFormat) throws IOException { + int size = 20; + String[] docs = new String[size]; + for (int i = 0; i < size; i++) { + docs[i] = "{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"; + } + index(docs); + + String request = "{\"query\":\"SELECT text, number, number + 5 AS sum FROM test ORDER BY number\", \"fetch_size\":2}"; + + String cursor = null; + for (int i = 0; i < 20; i += 2) { + Tuple response; + if (i == 0) { + response = runSqlAsText(StringUtils.EMPTY, new StringEntity(request, ContentType.APPLICATION_JSON), format); + } else { + response = runSqlAsText(StringUtils.EMPTY, new StringEntity("{\"cursor\":\"" + cursor + "\"}", + ContentType.APPLICATION_JSON), format); + } + + StringBuilder expected = new StringBuilder(); + if (i == 0) { + expected.append(expectedHeader); + if (format == "text/plain") { + expected.append("---------------+---------------+---------------\n"); + } + } + expected.append(String.format(Locale.ROOT, expectedLineFormat, "text" + i, i, i + 5)); + expected.append(String.format(Locale.ROOT, expectedLineFormat, "text" + (i + 1), i + 1, i + 6)); + cursor = response.v2(); + assertEquals(expected.toString(), response.v1()); + assertNotNull(cursor); + } + Map expected = new HashMap<>(); + expected.put("rows", emptyList()); + assertResponse(expected, runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), + StringUtils.EMPTY)); + + Map response = runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), + "/close"); + assertEquals(true, response.get("succeeded")); + + assertEquals(0, getNumberOfSearchContexts("test")); + } private Tuple runSqlAsText(String sql, String accept) throws IOException { return runSqlAsText(StringUtils.EMPTY, new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON), accept); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java index 62963a99b2a..f4e3e006e70 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java @@ -224,7 +224,7 @@ enum TextFormat { boolean header = hasHeader(request); - if (header) { + if (header && (cursor == null || cursor == Cursor.EMPTY)) { row(sb, response.columns(), ColumnInfo::name); } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml index 98bd0959179..a017da63312 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml @@ -90,6 +90,8 @@ setup: - match: { transforms.0.source.index.0: "airline-data" } - match: { transforms.0.dest.index: "airline-data-by-airline" } - is_true: transforms.0.source.query.match_all + - is_true: transforms.0.create_time + - is_true: transforms.0.version - match: { transforms.0.pivot.group_by.airline.terms.field: "airline" } - match: { transforms.0.pivot.aggregations.avg_response.avg.field: "responsetime" } - match: { transforms.0.description: "yaml test transform on airline-data" }