diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 110982e31e6..05e07049695 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -831,6 +831,9 @@ class BuildPlugin implements Plugin { // TODO: remove this once ctx isn't added to update script params in 7.0 systemProperty 'es.scripting.update.ctx_in_params', 'false' + //TODO: remove this once the cname is prepended to the address by default in 7.0 + systemProperty 'es.http.cname_in_publish_address', 'true' + // Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM if (project.inFipsJvm) { systemProperty 'javax.net.ssl.trustStorePassword', 'password' diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 914bae4d2c8..fee9a25aa35 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -16,7 +16,7 @@ slf4j = 1.6.2 jna = 4.5.1 # test dependencies -randomizedrunner = 2.5.2 +randomizedrunner = 2.7.0 junit = 4.12 httpclient = 4.5.2 # When updating httpcore, please also update server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index ecbe7f2d3a5..09c587cf81f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -28,10 +28,12 @@ import org.apache.http.entity.ByteArrayEntity; import org.apache.lucene.util.BytesRef; import org.elasticsearch.client.RequestConverters.EndpointBuilder; import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetCategoriesRequest; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobStatsRequest; @@ -39,6 +41,7 @@ import org.elasticsearch.client.ml.GetOverallBucketsRequest; import org.elasticsearch.client.ml.GetRecordsRequest; import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.common.Strings; @@ -180,6 +183,38 @@ final class MLRequestConverters { return request; } + static Request putDatafeed(PutDatafeedRequest putDatafeedRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("datafeeds") + .addPathPart(putDatafeedRequest.getDatafeed().getId()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + request.setEntity(createEntity(putDatafeedRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteForecast(DeleteForecastRequest deleteForecastRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(deleteForecastRequest.getJobId()) + .addPathPartAsIs("_forecast") + .addPathPart(Strings.collectionToCommaDelimitedString(deleteForecastRequest.getForecastIds())) + .build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + if (deleteForecastRequest.isAllowNoForecasts() != null) { + params.putParam("allow_no_forecasts", Boolean.toString(deleteForecastRequest.isAllowNoForecasts())); + } + if (deleteForecastRequest.timeout() != null) { + params.putParam("timeout", deleteForecastRequest.timeout().getStringRep()); + } + return request; + } + static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") @@ -194,6 +229,20 @@ final class MLRequestConverters { return request; } + static Request getCategories(GetCategoriesRequest getCategoriesRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(getCategoriesRequest.getJobId()) + .addPathPartAsIs("results") + .addPathPartAsIs("categories") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(getCategoriesRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request getOverallBuckets(GetOverallBucketsRequest getOverallBucketsRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index 85c5771f345..79f9267c94d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -19,19 +19,20 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.ml.ForecastJobRequest; -import org.elasticsearch.client.ml.ForecastJobResponse; -import org.elasticsearch.client.ml.PostDataRequest; -import org.elasticsearch.client.ml.PostDataResponse; -import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.FlushJobResponse; +import org.elasticsearch.client.ml.ForecastJobRequest; +import org.elasticsearch.client.ml.ForecastJobResponse; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetCategoriesRequest; +import org.elasticsearch.client.ml.GetCategoriesResponse; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersResponse; import org.elasticsearch.client.ml.GetJobRequest; @@ -44,13 +45,19 @@ import org.elasticsearch.client.ml.GetRecordsRequest; import org.elasticsearch.client.ml.GetRecordsResponse; import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PostDataResponse; +import org.elasticsearch.client.ml.PutDatafeedRequest; +import org.elasticsearch.client.ml.PutDatafeedResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.job.stats.JobStats; import java.io.IOException; import java.util.Collections; + /** * Machine Learning API client wrapper for the {@link RestHighLevelClient} * @@ -387,6 +394,11 @@ public final class MachineLearningClient { /** * Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job} * + *

+ * For additional info + * see + *

+ * * @param request the {@link UpdateJobRequest} object enclosing the desired updates * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return a PutJobResponse object containing the updated job object @@ -425,6 +437,10 @@ public final class MachineLearningClient { /** * Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job} asynchronously * + *

+ * For additional info + * see + *

* @param request the {@link UpdateJobRequest} object enclosing the desired updates * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion @@ -438,6 +454,86 @@ public final class MachineLearningClient { Collections.emptySet()); } + /** + * Creates a new Machine Learning Datafeed + *

+ * For additional info + * see ML PUT datafeed documentation + * + * @param request The PutDatafeedRequest containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} settings + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return PutDatafeedResponse with enclosed {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} object + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public PutDatafeedResponse putDatafeed(PutDatafeedRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::putDatafeed, + options, + PutDatafeedResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Creates a new Machine Learning Datafeed asynchronously and notifies listener on completion + *

+ * For additional info + * see ML PUT datafeed documentation + * + * @param request The request containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} settings + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void putDatafeedAsync(PutDatafeedRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::putDatafeed, + options, + PutDatafeedResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Deletes Machine Learning Job Forecasts + * + *

+ * For additional info + * see + *

+ * + * @param request the {@link DeleteForecastRequest} object enclosing the desired jobId, forecastIDs, and other options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return a AcknowledgedResponse object indicating request success + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public AcknowledgedResponse deleteForecast(DeleteForecastRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::deleteForecast, + options, + AcknowledgedResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Deletes Machine Learning Job Forecasts asynchronously + * + *

+ * For additional info + * see + *

+ * + * @param request the {@link DeleteForecastRequest} object enclosing the desired jobId, forecastIDs, and other options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void deleteForecastAsync(DeleteForecastRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::deleteForecast, + options, + AcknowledgedResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Gets the buckets for a Machine Learning Job. *

@@ -474,6 +570,45 @@ public final class MachineLearningClient { Collections.emptySet()); } + /** + * Gets the categories for a Machine Learning Job. + *

+ * For additional info + * see + * ML GET categories documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetCategoriesResponse getCategories(GetCategoriesRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getCategories, + options, + GetCategoriesResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets the categories for a Machine Learning Job, notifies listener once the requested buckets are retrieved. + *

+ * For additional info + * see + * ML GET categories documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getCategoriesAsync(GetCategoriesRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getCategories, + options, + GetCategoriesResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Gets overall buckets for a set of Machine Learning Jobs. *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteForecastRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteForecastRequest.java new file mode 100644 index 00000000000..f7c8a6c0733 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteForecastRequest.java @@ -0,0 +1,183 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * POJO for a delete forecast request + */ +public class DeleteForecastRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField FORECAST_ID = new ParseField("forecast_id"); + public static final ParseField ALLOW_NO_FORECASTS = new ParseField("allow_no_forecasts"); + public static final ParseField TIMEOUT = new ParseField("timeout"); + public static final String ALL = "_all"; + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("delete_forecast_request", (a) -> new DeleteForecastRequest((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareStringOrNull( + (c, p) -> c.setForecastIds(Strings.commaDelimitedListToStringArray(p)), FORECAST_ID); + PARSER.declareBoolean(DeleteForecastRequest::setAllowNoForecasts, ALLOW_NO_FORECASTS); + PARSER.declareString(DeleteForecastRequest::timeout, TIMEOUT); + } + + /** + * Create a new {@link DeleteForecastRequest} that explicitly deletes all forecasts + * + * @param jobId the jobId of the Job whose forecasts to delete + */ + public static DeleteForecastRequest deleteAllForecasts(String jobId) { + DeleteForecastRequest request = new DeleteForecastRequest(jobId); + request.setForecastIds(ALL); + return request; + } + + private final String jobId; + private List forecastIds = new ArrayList<>(); + private Boolean allowNoForecasts; + private TimeValue timeout; + + /** + * Create a new DeleteForecastRequest for the given Job ID + * + * @param jobId the jobId of the Job whose forecast(s) to delete + */ + public DeleteForecastRequest(String jobId) { + this.jobId = Objects.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public String getJobId() { + return jobId; + } + + public List getForecastIds() { + return forecastIds; + } + + /** + * The forecast IDs to delete. Can be also be {@link DeleteForecastRequest#ALL} to explicitly delete ALL forecasts + * + * @param forecastIds forecast IDs to delete + */ + public void setForecastIds(String... forecastIds) { + setForecastIds(Arrays.asList(forecastIds)); + } + + void setForecastIds(List forecastIds) { + if (forecastIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("forecastIds must not contain null values"); + } + this.forecastIds = new ArrayList<>(forecastIds); + } + + public Boolean isAllowNoForecasts() { + return allowNoForecasts; + } + + /** + * Sets the `allow_no_forecasts` field. + * + * @param allowNoForecasts when {@code true} no error is thrown when {@link DeleteForecastRequest#ALL} does not find any forecasts + */ + public void setAllowNoForecasts(boolean allowNoForecasts) { + this.allowNoForecasts = allowNoForecasts; + } + + /** + * Allows to set the timeout + * @param timeout timeout as a string (e.g. 1s) + */ + public void timeout(String timeout) { + this.timeout = TimeValue.parseTimeValue(timeout, this.timeout, getClass().getSimpleName() + ".timeout"); + } + + /** + * Allows to set the timeout + * @param timeout timeout as a {@link TimeValue} + */ + public void timeout(TimeValue timeout) { + this.timeout = timeout; + } + + public TimeValue timeout() { + return timeout; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DeleteForecastRequest that = (DeleteForecastRequest) other; + return Objects.equals(jobId, that.jobId) && + Objects.equals(forecastIds, that.forecastIds) && + Objects.equals(allowNoForecasts, that.allowNoForecasts) && + Objects.equals(timeout, that.timeout); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, forecastIds, allowNoForecasts, timeout); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (forecastIds != null) { + builder.field(FORECAST_ID.getPreferredName(), Strings.collectionToCommaDelimitedString(forecastIds)); + } + if (allowNoForecasts != null) { + builder.field(ALLOW_NO_FORECASTS.getPreferredName(), allowNoForecasts); + } + if (timeout != null) { + builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep()); + } + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesRequest.java new file mode 100644 index 00000000000..4fc68793f00 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesRequest.java @@ -0,0 +1,128 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * A request to retrieve categories of a given job + */ +public class GetCategoriesRequest extends ActionRequest implements ToXContentObject { + + + public static final ParseField CATEGORY_ID = new ParseField("category_id"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_categories_request", a -> new GetCategoriesRequest((String) a[0])); + + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareLong(GetCategoriesRequest::setCategoryId, CATEGORY_ID); + PARSER.declareObject(GetCategoriesRequest::setPageParams, PageParams.PARSER, PageParams.PAGE); + } + + private final String jobId; + private Long categoryId; + private PageParams pageParams; + + /** + * Constructs a request to retrieve category information from a given job + * @param jobId id of the job from which to retrieve results + */ + public GetCategoriesRequest(String jobId) { + this.jobId = Objects.requireNonNull(jobId); + } + + public String getJobId() { + return jobId; + } + + public PageParams getPageParams() { + return pageParams; + } + + public Long getCategoryId() { + return categoryId; + } + + /** + * Sets the category id + * @param categoryId the category id + */ + public void setCategoryId(Long categoryId) { + this.categoryId = categoryId; + } + + /** + * Sets the paging parameters + * @param pageParams the paging parameters + */ + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (categoryId != null) { + builder.field(CATEGORY_ID.getPreferredName(), categoryId); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetCategoriesRequest request = (GetCategoriesRequest) obj; + return Objects.equals(jobId, request.jobId) + && Objects.equals(categoryId, request.categoryId) + && Objects.equals(pageParams, request.pageParams); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, categoryId, pageParams); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesResponse.java new file mode 100644 index 00000000000..3d3abe00bfb --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesResponse.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.CategoryDefinition; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A response containing the requested categories + */ +public class GetCategoriesResponse extends AbstractResultResponse { + + public static final ParseField CATEGORIES = new ParseField("categories"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("get_categories_response", true, + a -> new GetCategoriesResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), CategoryDefinition.PARSER, CATEGORIES); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT); + } + + public static GetCategoriesResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + GetCategoriesResponse(List categories, long count) { + super(CATEGORIES, categories, count); + } + + /** + * The retrieved categories + * @return the retrieved categories + */ + public List categories() { + return results; + } + + @Override + public int hashCode() { + return Objects.hash(count, results); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetCategoriesResponse other = (GetCategoriesResponse) obj; + return count == other.count && Objects.equals(results, other.results); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDatafeedRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDatafeedRequest.java new file mode 100644 index 00000000000..34cb12599a6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDatafeedRequest.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request to create a new Machine Learning Datafeed given a {@link DatafeedConfig} configuration + */ +public class PutDatafeedRequest extends ActionRequest implements ToXContentObject { + + private final DatafeedConfig datafeed; + + /** + * Construct a new PutDatafeedRequest + * + * @param datafeed a {@link DatafeedConfig} configuration to create + */ + public PutDatafeedRequest(DatafeedConfig datafeed) { + this.datafeed = datafeed; + } + + public DatafeedConfig getDatafeed() { + return datafeed; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return datafeed.toXContent(builder, params); + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + + if (object == null || getClass() != object.getClass()) { + return false; + } + + PutDatafeedRequest request = (PutDatafeedRequest) object; + return Objects.equals(datafeed, request.datafeed); + } + + @Override + public int hashCode() { + return Objects.hash(datafeed); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDatafeedResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDatafeedResponse.java new file mode 100644 index 00000000000..fa9862fd3b9 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDatafeedResponse.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Response containing the newly created {@link DatafeedConfig} + */ +public class PutDatafeedResponse implements ToXContentObject { + + private DatafeedConfig datafeed; + + public static PutDatafeedResponse fromXContent(XContentParser parser) throws IOException { + return new PutDatafeedResponse(DatafeedConfig.PARSER.parse(parser, null).build()); + } + + PutDatafeedResponse(DatafeedConfig datafeed) { + this.datafeed = datafeed; + } + + public DatafeedConfig getResponse() { + return datafeed; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + datafeed.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + PutDatafeedResponse response = (PutDatafeedResponse) object; + return Objects.equals(datafeed, response.datafeed); + } + + @Override + public int hashCode() { + return Objects.hash(datafeed); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java index 752752b1038..84deae61f8e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java @@ -20,36 +20,37 @@ package org.elasticsearch.client.ml.datafeed; import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.Map; import java.util.Objects; /** - * Datafeed configuration options pojo. Describes where to proactively pull input - * data from. - *

- * If a value has not been set it will be null. Object wrappers are - * used around integral types and booleans so they can take null - * values. + * The datafeed configuration object. It specifies which indices + * to get the data from and offers parameters for customizing different + * aspects of the process. */ public class DatafeedConfig implements ToXContentObject { - public static final int DEFAULT_SCROLL_SIZE = 1000; - public static final ParseField ID = new ParseField("datafeed_id"); public static final ParseField QUERY_DELAY = new ParseField("query_delay"); public static final ParseField FREQUENCY = new ParseField("frequency"); @@ -59,7 +60,6 @@ public class DatafeedConfig implements ToXContentObject { public static final ParseField QUERY = new ParseField("query"); public static final ParseField SCROLL_SIZE = new ParseField("scroll_size"); public static final ParseField AGGREGATIONS = new ParseField("aggregations"); - public static final ParseField AGGS = new ParseField("aggs"); public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields"); public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config"); @@ -77,9 +77,8 @@ public class DatafeedConfig implements ToXContentObject { builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY); PARSER.declareString((builder, val) -> builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY); - PARSER.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY); - PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS); - PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS); + PARSER.declareField(Builder::setQuery, DatafeedConfig::parseBytes, QUERY, ObjectParser.ValueType.OBJECT); + PARSER.declareField(Builder::setAggregations, DatafeedConfig::parseBytes, AGGREGATIONS, ObjectParser.ValueType.OBJECT); PARSER.declareObject(Builder::setScriptFields, (p, c) -> { List parsedScriptFields = new ArrayList<>(); while (p.nextToken() != XContentParser.Token.END_OBJECT) { @@ -91,29 +90,26 @@ public class DatafeedConfig implements ToXContentObject { PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, CHUNKING_CONFIG); } + private static BytesReference parseBytes(XContentParser parser) throws IOException { + XContentBuilder contentBuilder = JsonXContent.contentBuilder(); + contentBuilder.generator().copyCurrentStructure(parser); + return BytesReference.bytes(contentBuilder); + } + private final String id; private final String jobId; - - /** - * The delay before starting to query a period of time - */ private final TimeValue queryDelay; - - /** - * The frequency with which queries are executed - */ private final TimeValue frequency; - private final List indices; private final List types; - private final QueryBuilder query; - private final AggregatorFactories.Builder aggregations; + private final BytesReference query; + private final BytesReference aggregations; private final List scriptFields; private final Integer scrollSize; private final ChunkingConfig chunkingConfig; private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, - QueryBuilder query, AggregatorFactories.Builder aggregations, List scriptFields, + BytesReference query, BytesReference aggregations, List scriptFields, Integer scrollSize, ChunkingConfig chunkingConfig) { this.id = id; this.jobId = jobId; @@ -156,11 +152,11 @@ public class DatafeedConfig implements ToXContentObject { return scrollSize; } - public QueryBuilder getQuery() { + public BytesReference getQuery() { return query; } - public AggregatorFactories.Builder getAggregations() { + public BytesReference getAggregations() { return aggregations; } @@ -183,11 +179,17 @@ public class DatafeedConfig implements ToXContentObject { if (frequency != null) { builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep()); } - builder.field(INDICES.getPreferredName(), indices); - builder.field(TYPES.getPreferredName(), types); - builder.field(QUERY.getPreferredName(), query); + if (indices != null) { + builder.field(INDICES.getPreferredName(), indices); + } + if (types != null) { + builder.field(TYPES.getPreferredName(), types); + } + if (query != null) { + builder.field(QUERY.getPreferredName(), asMap(query)); + } if (aggregations != null) { - builder.field(AGGREGATIONS.getPreferredName(), aggregations); + builder.field(AGGREGATIONS.getPreferredName(), asMap(aggregations)); } if (scriptFields != null) { builder.startObject(SCRIPT_FIELDS.getPreferredName()); @@ -196,7 +198,9 @@ public class DatafeedConfig implements ToXContentObject { } builder.endObject(); } - builder.field(SCROLL_SIZE.getPreferredName(), scrollSize); + if (scrollSize != null) { + builder.field(SCROLL_SIZE.getPreferredName(), scrollSize); + } if (chunkingConfig != null) { builder.field(CHUNKING_CONFIG.getPreferredName(), chunkingConfig); } @@ -205,10 +209,18 @@ public class DatafeedConfig implements ToXContentObject { return builder; } + private static Map asMap(BytesReference bytesReference) { + return bytesReference == null ? null : XContentHelper.convertToMap(bytesReference, true, XContentType.JSON).v2(); + } + /** * The lists of indices and types are compared for equality but they are not * sorted first so this test could fail simply because the indices and types * lists are in different orders. + * + * Also note this could be a heavy operation when a query or aggregations + * are set as we need to convert the bytes references into maps to correctly + * compare them. */ @Override public boolean equals(Object other) { @@ -228,31 +240,40 @@ public class DatafeedConfig implements ToXContentObject { && Objects.equals(this.queryDelay, that.queryDelay) && Objects.equals(this.indices, that.indices) && Objects.equals(this.types, that.types) - && Objects.equals(this.query, that.query) + && Objects.equals(asMap(this.query), asMap(that.query)) && Objects.equals(this.scrollSize, that.scrollSize) - && Objects.equals(this.aggregations, that.aggregations) + && Objects.equals(asMap(this.aggregations), asMap(that.aggregations)) && Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.chunkingConfig, that.chunkingConfig); } + /** + * Note this could be a heavy operation when a query or aggregations + * are set as we need to convert the bytes references into maps to + * compute a stable hash code. + */ @Override public int hashCode() { - return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields, + return Objects.hash(id, jobId, frequency, queryDelay, indices, types, asMap(query), scrollSize, asMap(aggregations), scriptFields, chunkingConfig); } + public static Builder builder(String id, String jobId) { + return new Builder(id, jobId); + } + public static class Builder { private String id; private String jobId; private TimeValue queryDelay; private TimeValue frequency; - private List indices = Collections.emptyList(); - private List types = Collections.emptyList(); - private QueryBuilder query = QueryBuilders.matchAllQuery(); - private AggregatorFactories.Builder aggregations; + private List indices; + private List types; + private BytesReference query; + private BytesReference aggregations; private List scriptFields; - private Integer scrollSize = DEFAULT_SCROLL_SIZE; + private Integer scrollSize; private ChunkingConfig chunkingConfig; public Builder(String id, String jobId) { @@ -279,8 +300,12 @@ public class DatafeedConfig implements ToXContentObject { return this; } + public Builder setIndices(String... indices) { + return setIndices(Arrays.asList(indices)); + } + public Builder setTypes(List types) { - this.types = Objects.requireNonNull(types, TYPES.getPreferredName()); + this.types = types; return this; } @@ -294,16 +319,36 @@ public class DatafeedConfig implements ToXContentObject { return this; } - public Builder setQuery(QueryBuilder query) { - this.query = Objects.requireNonNull(query, QUERY.getPreferredName()); + private Builder setQuery(BytesReference query) { + this.query = query; return this; } - public Builder setAggregations(AggregatorFactories.Builder aggregations) { + public Builder setQuery(String queryAsJson) { + this.query = queryAsJson == null ? null : new BytesArray(queryAsJson); + return this; + } + + public Builder setQuery(QueryBuilder query) throws IOException { + this.query = query == null ? null : xContentToBytes(query); + return this; + } + + private Builder setAggregations(BytesReference aggregations) { this.aggregations = aggregations; return this; } + public Builder setAggregations(String aggsAsJson) { + this.aggregations = aggsAsJson == null ? null : new BytesArray(aggsAsJson); + return this; + } + + public Builder setAggregations(AggregatorFactories.Builder aggregations) throws IOException { + this.aggregations = aggregations == null ? null : xContentToBytes(aggregations); + return this; + } + public Builder setScriptFields(List scriptFields) { List sorted = new ArrayList<>(scriptFields); sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName)); @@ -325,5 +370,12 @@ public class DatafeedConfig implements ToXContentObject { return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, chunkingConfig); } + + private static BytesReference xContentToBytes(ToXContentObject object) throws IOException { + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + object.toXContent(builder, ToXContentObject.EMPTY_PARAMS); + return BytesReference.bytes(builder); + } + } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java index 184d5d51481..1e59ea067ca 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java @@ -20,12 +20,17 @@ package org.elasticsearch.client.ml.datafeed; import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -35,6 +40,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.Map; import java.util.Objects; /** @@ -58,11 +64,9 @@ public class DatafeedUpdate implements ToXContentObject { TimeValue.parseTimeValue(val, DatafeedConfig.QUERY_DELAY.getPreferredName())), DatafeedConfig.QUERY_DELAY); PARSER.declareString((builder, val) -> builder.setFrequency( TimeValue.parseTimeValue(val, DatafeedConfig.FREQUENCY.getPreferredName())), DatafeedConfig.FREQUENCY); - PARSER.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), DatafeedConfig.QUERY); - PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), - DatafeedConfig.AGGREGATIONS); - PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), - DatafeedConfig.AGGS); + PARSER.declareField(Builder::setQuery, DatafeedUpdate::parseBytes, DatafeedConfig.QUERY, ObjectParser.ValueType.OBJECT); + PARSER.declareField(Builder::setAggregations, DatafeedUpdate::parseBytes, DatafeedConfig.AGGREGATIONS, + ObjectParser.ValueType.OBJECT); PARSER.declareObject(Builder::setScriptFields, (p, c) -> { List parsedScriptFields = new ArrayList<>(); while (p.nextToken() != XContentParser.Token.END_OBJECT) { @@ -74,20 +78,26 @@ public class DatafeedUpdate implements ToXContentObject { PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, DatafeedConfig.CHUNKING_CONFIG); } + private static BytesReference parseBytes(XContentParser parser) throws IOException { + XContentBuilder contentBuilder = JsonXContent.contentBuilder(); + contentBuilder.generator().copyCurrentStructure(parser); + return BytesReference.bytes(contentBuilder); + } + private final String id; private final String jobId; private final TimeValue queryDelay; private final TimeValue frequency; private final List indices; private final List types; - private final QueryBuilder query; - private final AggregatorFactories.Builder aggregations; + private final BytesReference query; + private final BytesReference aggregations; private final List scriptFields; private final Integer scrollSize; private final ChunkingConfig chunkingConfig; private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, - QueryBuilder query, AggregatorFactories.Builder aggregations, List scriptFields, + BytesReference query, BytesReference aggregations, List scriptFields, Integer scrollSize, ChunkingConfig chunkingConfig) { this.id = id; this.jobId = jobId; @@ -121,9 +131,13 @@ public class DatafeedUpdate implements ToXContentObject { builder.field(DatafeedConfig.FREQUENCY.getPreferredName(), frequency.getStringRep()); } addOptionalField(builder, DatafeedConfig.INDICES, indices); + if (query != null) { + builder.field(DatafeedConfig.QUERY.getPreferredName(), asMap(query)); + } + if (aggregations != null) { + builder.field(DatafeedConfig.AGGREGATIONS.getPreferredName(), asMap(aggregations)); + } addOptionalField(builder, DatafeedConfig.TYPES, types); - addOptionalField(builder, DatafeedConfig.QUERY, query); - addOptionalField(builder, DatafeedConfig.AGGREGATIONS, aggregations); if (scriptFields != null) { builder.startObject(DatafeedConfig.SCRIPT_FIELDS.getPreferredName()); for (SearchSourceBuilder.ScriptField scriptField : scriptFields) { @@ -167,11 +181,11 @@ public class DatafeedUpdate implements ToXContentObject { return scrollSize; } - public QueryBuilder getQuery() { + public BytesReference getQuery() { return query; } - public AggregatorFactories.Builder getAggregations() { + public BytesReference getAggregations() { return aggregations; } @@ -183,10 +197,18 @@ public class DatafeedUpdate implements ToXContentObject { return chunkingConfig; } + private static Map asMap(BytesReference bytesReference) { + return bytesReference == null ? null : XContentHelper.convertToMap(bytesReference, true, XContentType.JSON).v2(); + } + /** * The lists of indices and types are compared for equality but they are not * sorted first so this test could fail simply because the indices and types * lists are in different orders. + * + * Also note this could be a heavy operation when a query or aggregations + * are set as we need to convert the bytes references into maps to correctly + * compare them. */ @Override public boolean equals(Object other) { @@ -206,19 +228,28 @@ public class DatafeedUpdate implements ToXContentObject { && Objects.equals(this.queryDelay, that.queryDelay) && Objects.equals(this.indices, that.indices) && Objects.equals(this.types, that.types) - && Objects.equals(this.query, that.query) + && Objects.equals(asMap(this.query), asMap(that.query)) && Objects.equals(this.scrollSize, that.scrollSize) - && Objects.equals(this.aggregations, that.aggregations) + && Objects.equals(asMap(this.aggregations), asMap(that.aggregations)) && Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.chunkingConfig, that.chunkingConfig); } + /** + * Note this could be a heavy operation when a query or aggregations + * are set as we need to convert the bytes references into maps to + * compute a stable hash code. + */ @Override public int hashCode() { - return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields, + return Objects.hash(id, jobId, frequency, queryDelay, indices, types, asMap(query), scrollSize, asMap(aggregations), scriptFields, chunkingConfig); } + public static Builder builder(String id) { + return new Builder(id); + } + public static class Builder { private String id; @@ -227,8 +258,8 @@ public class DatafeedUpdate implements ToXContentObject { private TimeValue frequency; private List indices; private List types; - private QueryBuilder query; - private AggregatorFactories.Builder aggregations; + private BytesReference query; + private BytesReference aggregations; private List scriptFields; private Integer scrollSize; private ChunkingConfig chunkingConfig; @@ -276,16 +307,36 @@ public class DatafeedUpdate implements ToXContentObject { return this; } - public Builder setQuery(QueryBuilder query) { + private Builder setQuery(BytesReference query) { this.query = query; return this; } - public Builder setAggregations(AggregatorFactories.Builder aggregations) { + public Builder setQuery(String queryAsJson) { + this.query = queryAsJson == null ? null : new BytesArray(queryAsJson); + return this; + } + + public Builder setQuery(QueryBuilder query) throws IOException { + this.query = query == null ? null : xContentToBytes(query); + return this; + } + + private Builder setAggregations(BytesReference aggregations) { this.aggregations = aggregations; return this; } + public Builder setAggregations(String aggsAsJson) { + this.aggregations = aggsAsJson == null ? null : new BytesArray(aggsAsJson); + return this; + } + + public Builder setAggregations(AggregatorFactories.Builder aggregations) throws IOException { + this.aggregations = aggregations == null ? null : xContentToBytes(aggregations); + return this; + } + public Builder setScriptFields(List scriptFields) { List sorted = new ArrayList<>(scriptFields); sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName)); @@ -307,5 +358,12 @@ public class DatafeedUpdate implements ToXContentObject { return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, chunkingConfig); } + + private static BytesReference xContentToBytes(ToXContentObject object) throws IOException { + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + object.toXContent(builder, ToXContentObject.EMPTY_PARAMS); + return BytesReference.bytes(builder); + } + } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index 26e6251af48..19db672e35b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -24,10 +24,12 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetCategoriesRequest; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobStatsRequest; @@ -35,14 +37,18 @@ import org.elasticsearch.client.ml.GetOverallBucketsRequest; import org.elasticsearch.client.ml.GetRecordsRequest; import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; +import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests; import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.client.ml.job.config.JobUpdateTests; import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -203,6 +209,47 @@ public class MLRequestConvertersTests extends ESTestCase { } } + public void testPutDatafeed() throws IOException { + DatafeedConfig datafeed = DatafeedConfigTests.createRandom(); + PutDatafeedRequest putDatafeedRequest = new PutDatafeedRequest(datafeed); + + Request request = MLRequestConverters.putDatafeed(putDatafeedRequest); + + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertThat(request.getEndpoint(), equalTo("/_xpack/ml/datafeeds/" + datafeed.getId())); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + DatafeedConfig parsedDatafeed = DatafeedConfig.PARSER.apply(parser, null).build(); + assertThat(parsedDatafeed, equalTo(datafeed)); + } + } + + public void testDeleteForecast() throws Exception { + String jobId = randomAlphaOfLength(10); + DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest(jobId); + + Request request = MLRequestConverters.deleteForecast(deleteForecastRequest); + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_forecast", request.getEndpoint()); + assertFalse(request.getParameters().containsKey("timeout")); + assertFalse(request.getParameters().containsKey("allow_no_forecasts")); + + deleteForecastRequest.setForecastIds(randomAlphaOfLength(10), randomAlphaOfLength(10)); + deleteForecastRequest.timeout("10s"); + deleteForecastRequest.setAllowNoForecasts(true); + + request = MLRequestConverters.deleteForecast(deleteForecastRequest); + assertEquals( + "/_xpack/ml/anomaly_detectors/" + + jobId + + "/_forecast/" + + Strings.collectionToCommaDelimitedString(deleteForecastRequest.getForecastIds()), + request.getEndpoint()); + assertEquals("10s", + request.getParameters().get(DeleteForecastRequest.TIMEOUT.getPreferredName())); + assertEquals(Boolean.toString(true), + request.getParameters().get(DeleteForecastRequest.ALLOW_NO_FORECASTS.getPreferredName())); + } + public void testGetBuckets() throws IOException { String jobId = randomAlphaOfLength(10); GetBucketsRequest getBucketsRequest = new GetBucketsRequest(jobId); @@ -220,6 +267,21 @@ public class MLRequestConvertersTests extends ESTestCase { } } + public void testGetCategories() throws IOException { + String jobId = randomAlphaOfLength(10); + GetCategoriesRequest getCategoriesRequest = new GetCategoriesRequest(jobId); + getCategoriesRequest.setPageParams(new PageParams(100, 300)); + + + Request request = MLRequestConverters.getCategories(getCategoriesRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/results/categories", request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + GetCategoriesRequest parsedRequest = GetCategoriesRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(getCategoriesRequest)); + } + } + public void testGetOverallBuckets() throws IOException { String jobId = randomAlphaOfLength(10); GetOverallBucketsRequest getOverallBucketsRequest = new GetOverallBucketsRequest(jobId); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java index 40d8596d1ba..ddaec641573 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetCategoriesRequest; +import org.elasticsearch.client.ml.GetCategoriesResponse; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersResponse; import org.elasticsearch.client.ml.GetOverallBucketsRequest; @@ -126,11 +128,150 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { bulkRequest.add(indexRequest); } + private void addCategoryIndexRequest(long categoryId, String categoryName, BulkRequest bulkRequest) { + IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC); + indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"category_id\": " + categoryId + ", \"terms\": \"" + + categoryName + "\", \"regex\": \".*?" + categoryName + ".*\", \"max_matching_length\": 3, \"examples\": [\"" + + categoryName + "\"]}", XContentType.JSON); + bulkRequest.add(indexRequest); + } + + private void addCategoriesIndexRequests(BulkRequest bulkRequest) { + + List categories = Arrays.asList("AAL", "JZA", "JBU"); + + for (int i = 0; i < categories.size(); i++) { + addCategoryIndexRequest(i+1, categories.get(i), bulkRequest); + } + } + @After public void deleteJob() throws IOException { new MlRestTestStateCleaner(logger, client()).clearMlMetadata(); } + public void testGetCategories() throws IOException { + + // index some category results + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + addCategoriesIndexRequests(bulkRequest); + + highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); + + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setPageParams(new PageParams(0, 10000)); + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(3L)); + assertThat(response.categories().size(), equalTo(3)); + assertThat(response.categories().get(0).getCategoryId(), equalTo(1L)); + assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?AAL.*")); + assertThat(response.categories().get(0).getRegex(), equalTo(".*?AAL.*")); + assertThat(response.categories().get(0).getTerms(), equalTo("AAL")); + + assertThat(response.categories().get(1).getCategoryId(), equalTo(2L)); + assertThat(response.categories().get(1).getGrokPattern(), equalTo(".*?JZA.*")); + assertThat(response.categories().get(1).getRegex(), equalTo(".*?JZA.*")); + assertThat(response.categories().get(1).getTerms(), equalTo("JZA")); + + assertThat(response.categories().get(2).getCategoryId(), equalTo(3L)); + assertThat(response.categories().get(2).getGrokPattern(), equalTo(".*?JBU.*")); + assertThat(response.categories().get(2).getRegex(), equalTo(".*?JBU.*")); + assertThat(response.categories().get(2).getTerms(), equalTo("JBU")); + } + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setPageParams(new PageParams(0, 1)); + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(3L)); + assertThat(response.categories().size(), equalTo(1)); + assertThat(response.categories().get(0).getCategoryId(), equalTo(1L)); + assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?AAL.*")); + assertThat(response.categories().get(0).getRegex(), equalTo(".*?AAL.*")); + assertThat(response.categories().get(0).getTerms(), equalTo("AAL")); + } + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setPageParams(new PageParams(1, 2)); + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(3L)); + assertThat(response.categories().size(), equalTo(2)); + assertThat(response.categories().get(0).getCategoryId(), equalTo(2L)); + assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?JZA.*")); + assertThat(response.categories().get(0).getRegex(), equalTo(".*?JZA.*")); + assertThat(response.categories().get(0).getTerms(), equalTo("JZA")); + + assertThat(response.categories().get(1).getCategoryId(), equalTo(3L)); + assertThat(response.categories().get(1).getGrokPattern(), equalTo(".*?JBU.*")); + assertThat(response.categories().get(1).getRegex(), equalTo(".*?JBU.*")); + assertThat(response.categories().get(1).getTerms(), equalTo("JBU")); + } + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setCategoryId(0L); // request a non-existent category + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(0L)); + assertThat(response.categories().size(), equalTo(0)); + } + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setCategoryId(1L); + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(1L)); + assertThat(response.categories().size(), equalTo(1)); + assertThat(response.categories().get(0).getCategoryId(), equalTo(1L)); + assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?AAL.*")); + assertThat(response.categories().get(0).getRegex(), equalTo(".*?AAL.*")); + assertThat(response.categories().get(0).getTerms(), equalTo("AAL")); + } + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setCategoryId(2L); + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(1L)); + assertThat(response.categories().get(0).getCategoryId(), equalTo(2L)); + assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?JZA.*")); + assertThat(response.categories().get(0).getRegex(), equalTo(".*?JZA.*")); + assertThat(response.categories().get(0).getTerms(), equalTo("JZA")); + + } + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setCategoryId(3L); + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(1L)); + assertThat(response.categories().get(0).getCategoryId(), equalTo(3L)); + assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?JBU.*")); + assertThat(response.categories().get(0).getRegex(), equalTo(".*?JBU.*")); + assertThat(response.categories().get(0).getTerms(), equalTo("JBU")); + } + } + public void testGetBuckets() throws IOException { MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index fb715683b27..c0bf1055058 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -20,33 +20,40 @@ package org.elasticsearch.client; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.client.ml.ForecastJobRequest; -import org.elasticsearch.client.ml.ForecastJobResponse; -import org.elasticsearch.client.ml.PostDataRequest; -import org.elasticsearch.client.ml.PostDataResponse; -import org.elasticsearch.client.ml.UpdateJobRequest; -import org.elasticsearch.client.ml.job.config.JobUpdate; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.client.ml.GetJobStatsRequest; -import org.elasticsearch.client.ml.GetJobStatsResponse; -import org.elasticsearch.client.ml.job.config.JobState; -import org.elasticsearch.client.ml.job.stats.JobStats; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.FlushJobResponse; +import org.elasticsearch.client.ml.ForecastJobRequest; +import org.elasticsearch.client.ml.ForecastJobResponse; import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobResponse; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetJobStatsResponse; import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PostDataResponse; +import org.elasticsearch.client.ml.PutDatafeedRequest; +import org.elasticsearch.client.ml.PutDatafeedResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.DataDescription; import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Job; -import org.elasticsearch.client.ml.FlushJobRequest; -import org.elasticsearch.client.ml.FlushJobResponse; +import org.elasticsearch.client.ml.job.config.JobState; +import org.elasticsearch.client.ml.job.config.JobUpdate; +import org.elasticsearch.client.ml.job.stats.JobStats; +import org.elasticsearch.common.unit.TimeValue; import org.junit.After; import java.io.IOException; @@ -288,6 +295,92 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { assertEquals("Updated description", getResponse.jobs().get(0).getDescription()); } + public void testPutDatafeed() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + execute(new PutJobRequest(job), machineLearningClient::putJob, machineLearningClient::putJobAsync); + + String datafeedId = "datafeed-" + jobId; + DatafeedConfig datafeedConfig = DatafeedConfig.builder(datafeedId, jobId).setIndices("some_data_index").build(); + + PutDatafeedResponse response = execute(new PutDatafeedRequest(datafeedConfig), machineLearningClient::putDatafeed, + machineLearningClient::putDatafeedAsync); + + DatafeedConfig createdDatafeed = response.getResponse(); + assertThat(createdDatafeed.getId(), equalTo(datafeedId)); + assertThat(createdDatafeed.getIndices(), equalTo(datafeedConfig.getIndices())); + } + + public void testDeleteForecast() throws Exception { + String jobId = "test-delete-forecast"; + + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT); + + Job noForecastsJob = buildJob("test-delete-forecast-none"); + machineLearningClient.putJob(new PutJobRequest(noForecastsJob), RequestOptions.DEFAULT); + + PostDataRequest.JsonBuilder builder = new PostDataRequest.JsonBuilder(); + for(int i = 0; i < 30; i++) { + Map hashMap = new HashMap<>(); + hashMap.put("total", randomInt(1000)); + hashMap.put("timestamp", (i+1)*1000); + builder.addDoc(hashMap); + } + + PostDataRequest postDataRequest = new PostDataRequest(jobId, builder); + machineLearningClient.postData(postDataRequest, RequestOptions.DEFAULT); + machineLearningClient.flushJob(new FlushJobRequest(jobId), RequestOptions.DEFAULT); + ForecastJobResponse forecastJobResponse1 = machineLearningClient.forecastJob(new ForecastJobRequest(jobId), RequestOptions.DEFAULT); + ForecastJobResponse forecastJobResponse2 = machineLearningClient.forecastJob(new ForecastJobRequest(jobId), RequestOptions.DEFAULT); + waitForForecastToComplete(jobId, forecastJobResponse1.getForecastId()); + waitForForecastToComplete(jobId, forecastJobResponse2.getForecastId()); + + { + DeleteForecastRequest request = new DeleteForecastRequest(jobId); + request.setForecastIds(forecastJobResponse1.getForecastId(), forecastJobResponse2.getForecastId()); + AcknowledgedResponse response = execute(request, machineLearningClient::deleteForecast, + machineLearningClient::deleteForecastAsync); + assertTrue(response.isAcknowledged()); + assertFalse(forecastExists(jobId, forecastJobResponse1.getForecastId())); + assertFalse(forecastExists(jobId, forecastJobResponse2.getForecastId())); + } + { + DeleteForecastRequest request = DeleteForecastRequest.deleteAllForecasts(noForecastsJob.getId()); + request.setAllowNoForecasts(true); + AcknowledgedResponse response = execute(request, machineLearningClient::deleteForecast, + machineLearningClient::deleteForecastAsync); + assertTrue(response.isAcknowledged()); + } + { + DeleteForecastRequest request = DeleteForecastRequest.deleteAllForecasts(noForecastsJob.getId()); + request.setAllowNoForecasts(false); + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, + () -> execute(request, machineLearningClient::deleteForecast, machineLearningClient::deleteForecastAsync)); + assertThat(exception.status().getStatus(), equalTo(404)); + } + } + + private void waitForForecastToComplete(String jobId, String forecastId) throws Exception { + GetRequest request = new GetRequest(".ml-anomalies-" + jobId); + request.id(jobId + "_model_forecast_request_stats_" + forecastId); + assertBusy(() -> { + GetResponse getResponse = highLevelClient().get(request, RequestOptions.DEFAULT); + assertTrue(getResponse.isExists()); + assertTrue(getResponse.getSourceAsString().contains("finished")); + }, 30, TimeUnit.SECONDS); + } + + private boolean forecastExists(String jobId, String forecastId) throws Exception { + GetRequest getRequest = new GetRequest(".ml-anomalies-" + jobId); + getRequest.id(jobId + "_model_forecast_request_stats_" + forecastId); + GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT); + return getResponse.isExists(); + } + public static String randomValidJobId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 9abef54d0d2..3e43792ac6a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -21,8 +21,11 @@ package org.elasticsearch.client.documentation; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.MachineLearningGetResultsIT; import org.elasticsearch.client.MachineLearningIT; @@ -31,6 +34,7 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; import org.elasticsearch.client.ml.FlushJobRequest; @@ -39,6 +43,8 @@ import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.ForecastJobResponse; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetCategoriesRequest; +import org.elasticsearch.client.ml.GetCategoriesResponse; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersResponse; import org.elasticsearch.client.ml.GetJobRequest; @@ -53,28 +59,36 @@ import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.OpenJobResponse; import org.elasticsearch.client.ml.PostDataRequest; import org.elasticsearch.client.ml.PostDataResponse; +import org.elasticsearch.client.ml.PutDatafeedRequest; +import org.elasticsearch.client.ml.PutDatafeedResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.datafeed.ChunkingConfig; +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.AnalysisLimits; import org.elasticsearch.client.ml.job.config.DataDescription; import org.elasticsearch.client.ml.job.config.DetectionRule; import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Job; -import org.elasticsearch.client.ml.job.process.DataCounts; import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.client.ml.job.config.ModelPlotConfig; import org.elasticsearch.client.ml.job.config.Operator; import org.elasticsearch.client.ml.job.config.RuleCondition; +import org.elasticsearch.client.ml.job.process.DataCounts; import org.elasticsearch.client.ml.job.results.AnomalyRecord; import org.elasticsearch.client.ml.job.results.Bucket; +import org.elasticsearch.client.ml.job.results.CategoryDefinition; import org.elasticsearch.client.ml.job.results.Influencer; import org.elasticsearch.client.ml.job.results.OverallBucket; import org.elasticsearch.client.ml.job.stats.JobStats; import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.junit.After; import java.io.IOException; @@ -90,6 +104,7 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.core.Is.is; @@ -182,8 +197,6 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { public void testGetJob() throws Exception { RestHighLevelClient client = highLevelClient(); - String jobId = "get-machine-learning-job1"; - Job job = MachineLearningIT.buildJob("get-machine-learning-job1"); client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); @@ -473,7 +486,107 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } - + + public void testPutDatafeed() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + // We need to create a job for the datafeed request to be valid + String jobId = "put-datafeed-job-1"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + String id = "datafeed-1"; + + //tag::x-pack-ml-create-datafeed-config + DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder(id, jobId) // <1> + .setIndices("index_1", "index_2"); // <2> + //end::x-pack-ml-create-datafeed-config + + AggregatorFactories.Builder aggs = AggregatorFactories.builder(); + + //tag::x-pack-ml-create-datafeed-config-set-aggregations + datafeedBuilder.setAggregations(aggs); // <1> + //end::x-pack-ml-create-datafeed-config-set-aggregations + + // Clearing aggregation to avoid complex validation rules + datafeedBuilder.setAggregations((String) null); + + //tag::x-pack-ml-create-datafeed-config-set-chunking-config + datafeedBuilder.setChunkingConfig(ChunkingConfig.newAuto()); // <1> + //end::x-pack-ml-create-datafeed-config-set-chunking-config + + //tag::x-pack-ml-create-datafeed-config-set-frequency + datafeedBuilder.setFrequency(TimeValue.timeValueSeconds(30)); // <1> + //end::x-pack-ml-create-datafeed-config-set-frequency + + //tag::x-pack-ml-create-datafeed-config-set-query + datafeedBuilder.setQuery(QueryBuilders.matchAllQuery()); // <1> + //end::x-pack-ml-create-datafeed-config-set-query + + //tag::x-pack-ml-create-datafeed-config-set-query-delay + datafeedBuilder.setQueryDelay(TimeValue.timeValueMinutes(1)); // <1> + //end::x-pack-ml-create-datafeed-config-set-query-delay + + List scriptFields = Collections.emptyList(); + //tag::x-pack-ml-create-datafeed-config-set-script-fields + datafeedBuilder.setScriptFields(scriptFields); // <1> + //end::x-pack-ml-create-datafeed-config-set-script-fields + + //tag::x-pack-ml-create-datafeed-config-set-scroll-size + datafeedBuilder.setScrollSize(1000); // <1> + //end::x-pack-ml-create-datafeed-config-set-scroll-size + + //tag::x-pack-ml-put-datafeed-request + PutDatafeedRequest request = new PutDatafeedRequest(datafeedBuilder.build()); // <1> + //end::x-pack-ml-put-datafeed-request + + //tag::x-pack-ml-put-datafeed-execute + PutDatafeedResponse response = client.machineLearning().putDatafeed(request, RequestOptions.DEFAULT); + //end::x-pack-ml-put-datafeed-execute + + //tag::x-pack-ml-put-datafeed-response + DatafeedConfig datafeed = response.getResponse(); // <1> + //end::x-pack-ml-put-datafeed-response + assertThat(datafeed.getId(), equalTo("datafeed-1")); + } + { + // We need to create a job for the datafeed request to be valid + String jobId = "put-datafeed-job-2"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + String id = "datafeed-2"; + + DatafeedConfig datafeed = new DatafeedConfig.Builder(id, jobId).setIndices("index_1", "index_2").build(); + + PutDatafeedRequest request = new PutDatafeedRequest(datafeed); + // tag::x-pack-ml-put-datafeed-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(PutDatafeedResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-put-datafeed-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-put-datafeed-execute-async + client.machineLearning().putDatafeedAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-put-datafeed-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testGetBuckets() throws IOException, InterruptedException { RestHighLevelClient client = highLevelClient(); @@ -636,8 +749,85 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testDeleteForecast() throws Exception { + RestHighLevelClient client = highLevelClient(); + Job job = MachineLearningIT.buildJob("deleting-forecast-for-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + PostDataRequest.JsonBuilder builder = new PostDataRequest.JsonBuilder(); + for(int i = 0; i < 30; i++) { + Map hashMap = new HashMap<>(); + hashMap.put("total", randomInt(1000)); + hashMap.put("timestamp", (i+1)*1000); + builder.addDoc(hashMap); + } + PostDataRequest postDataRequest = new PostDataRequest(job.getId(), builder); + client.machineLearning().postData(postDataRequest, RequestOptions.DEFAULT); + client.machineLearning().flushJob(new FlushJobRequest(job.getId()), RequestOptions.DEFAULT); + ForecastJobResponse forecastJobResponse = client.machineLearning(). + forecastJob(new ForecastJobRequest(job.getId()), RequestOptions.DEFAULT); + String forecastId = forecastJobResponse.getForecastId(); + + GetRequest request = new GetRequest(".ml-anomalies-" + job.getId()); + request.id(job.getId() + "_model_forecast_request_stats_" + forecastId); + assertBusy(() -> { + GetResponse getResponse = highLevelClient().get(request, RequestOptions.DEFAULT); + assertTrue(getResponse.isExists()); + assertTrue(getResponse.getSourceAsString().contains("finished")); + }, 30, TimeUnit.SECONDS); + + { + //tag::x-pack-ml-delete-forecast-request + DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest("deleting-forecast-for-job"); //<1> + //end::x-pack-ml-delete-forecast-request + + //tag::x-pack-ml-delete-forecast-request-options + deleteForecastRequest.setForecastIds(forecastId); //<1> + deleteForecastRequest.timeout("30s"); //<2> + deleteForecastRequest.setAllowNoForecasts(true); //<3> + //end::x-pack-ml-delete-forecast-request-options + + //tag::x-pack-ml-delete-forecast-execute + AcknowledgedResponse deleteForecastResponse = client.machineLearning().deleteForecast(deleteForecastRequest, + RequestOptions.DEFAULT); + //end::x-pack-ml-delete-forecast-execute + + //tag::x-pack-ml-delete-forecast-response + boolean isAcknowledged = deleteForecastResponse.isAcknowledged(); //<1> + //end::x-pack-ml-delete-forecast-response + } + { + //tag::x-pack-ml-delete-forecast-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse DeleteForecastResponse) { + //<1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-ml-delete-forecast-listener + DeleteForecastRequest deleteForecastRequest = DeleteForecastRequest.deleteAllForecasts(job.getId()); + deleteForecastRequest.setAllowNoForecasts(true); + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-delete-forecast-execute-async + client.machineLearning().deleteForecastAsync(deleteForecastRequest, RequestOptions.DEFAULT, listener); //<1> + // end::x-pack-ml-delete-forecast-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testGetJobStats() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -1111,4 +1301,74 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testGetCategories() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String jobId = "test-get-categories"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + // Let us index a category + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\": \"test-get-categories\", \"category_id\": 1, \"terms\": \"AAL\"," + + " \"regex\": \".*?AAL.*\", \"max_matching_length\": 3, \"examples\": [\"AAL\"]}", XContentType.JSON); + client.index(indexRequest, RequestOptions.DEFAULT); + + { + // tag::x-pack-ml-get-categories-request + GetCategoriesRequest request = new GetCategoriesRequest(jobId); // <1> + // end::x-pack-ml-get-categories-request + + // tag::x-pack-ml-get-categories-category-id + request.setCategoryId(1L); // <1> + // end::x-pack-ml-get-categories-category-id + + // tag::x-pack-ml-get-categories-page + request.setPageParams(new PageParams(100, 200)); // <1> + // end::x-pack-ml-get-categories-page + + // Set page params back to null so the response contains the category we indexed + request.setPageParams(null); + + // tag::x-pack-ml-get-categories-execute + GetCategoriesResponse response = client.machineLearning().getCategories(request, RequestOptions.DEFAULT); + // end::x-pack-ml-get-categories-execute + + // tag::x-pack-ml-get-categories-response + long count = response.count(); // <1> + List categories = response.categories(); // <2> + // end::x-pack-ml-get-categories-response + assertEquals(1, categories.size()); + } + { + GetCategoriesRequest request = new GetCategoriesRequest(jobId); + + // tag::x-pack-ml-get-categories-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetCategoriesResponse getcategoriesResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-categories-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-categories-execute-async + client.machineLearning().getCategoriesAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-categories-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteForecastRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteForecastRequestTests.java new file mode 100644 index 00000000000..ad012277711 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteForecastRequestTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.JobTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class DeleteForecastRequestTests extends AbstractXContentTestCase { + + @Override + protected DeleteForecastRequest createTestInstance() { + + DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest(JobTests.randomValidJobId()); + if (randomBoolean()) { + int length = randomInt(10); + List ids = new ArrayList<>(length); + for(int i = 0; i < length; i++) { + ids.add(randomAlphaOfLength(10)); + } + deleteForecastRequest.setForecastIds(ids); + } + if (randomBoolean()) { + deleteForecastRequest.setAllowNoForecasts(randomBoolean()); + } + if (randomBoolean()) { + deleteForecastRequest.timeout(randomTimeValue()); + } + return deleteForecastRequest; + } + + @Override + protected DeleteForecastRequest doParseInstance(XContentParser parser) throws IOException { + return DeleteForecastRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesRequestTests.java new file mode 100644 index 00000000000..7d9fe2b238f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesRequestTests.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class GetCategoriesRequestTests extends AbstractXContentTestCase { + + @Override + protected GetCategoriesRequest createTestInstance() { + GetCategoriesRequest request = new GetCategoriesRequest(randomAlphaOfLengthBetween(1, 20)); + if (randomBoolean()) { + request.setCategoryId(randomNonNegativeLong()); + } else { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + return request; + } + + @Override + protected GetCategoriesRequest doParseInstance(XContentParser parser) throws IOException { + return GetCategoriesRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesResponseTests.java new file mode 100644 index 00000000000..e8718ba20e9 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.CategoryDefinition; +import org.elasticsearch.client.ml.job.results.CategoryDefinitionTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetCategoriesResponseTests extends AbstractXContentTestCase { + + @Override + protected GetCategoriesResponse createTestInstance() { + String jobId = randomAlphaOfLength(20); + int listSize = randomInt(10); + List categories = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + CategoryDefinition category = CategoryDefinitionTests.createTestInstance(jobId); + categories.add(category); + } + return new GetCategoriesResponse(categories, listSize); + } + + @Override + protected GetCategoriesResponse doParseInstance(XContentParser parser) throws IOException { + return GetCategoriesResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutDatafeedRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutDatafeedRequestTests.java new file mode 100644 index 00000000000..5af30d32574 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutDatafeedRequestTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; +import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + + +public class PutDatafeedRequestTests extends AbstractXContentTestCase { + + @Override + protected PutDatafeedRequest createTestInstance() { + return new PutDatafeedRequest(DatafeedConfigTests.createRandom()); + } + + @Override + protected PutDatafeedRequest doParseInstance(XContentParser parser) { + return new PutDatafeedRequest(DatafeedConfig.PARSER.apply(parser, null).build()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutDatafeedResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutDatafeedResponseTests.java new file mode 100644 index 00000000000..5b2428167b9 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutDatafeedResponseTests.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.function.Predicate; + +public class PutDatafeedResponseTests extends AbstractXContentTestCase { + + @Override + protected PutDatafeedResponse createTestInstance() { + return new PutDatafeedResponse(DatafeedConfigTests.createRandom()); + } + + @Override + protected PutDatafeedResponse doParseInstance(XContentParser parser) throws IOException { + return PutDatafeedResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java index 8ed51415521..3a7910ad732 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.client.ml.datafeed; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -27,7 +26,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; @@ -36,19 +34,26 @@ import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; public class DatafeedConfigTests extends AbstractXContentTestCase { @Override protected DatafeedConfig createTestInstance() { + return createRandom(); + } + + public static DatafeedConfig createRandom() { long bucketSpanMillis = 3600000; DatafeedConfig.Builder builder = constructBuilder(); builder.setIndices(randomStringList(1, 10)); builder.setTypes(randomStringList(0, 10)); if (randomBoolean()) { - builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + try { + builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + } catch (IOException e) { + throw new RuntimeException("Failed to serialize query", e); + } } boolean addScriptFields = randomBoolean(); if (addScriptFields) { @@ -72,7 +77,11 @@ public class DatafeedConfigTests extends AbstractXContentTestCase randomStringList(int min, int max) { int size = scaledRandomIntBetween(min, max); List list = new ArrayList<>(); @@ -150,21 +153,6 @@ public class DatafeedConfigTests extends AbstractXContentTestCase new DatafeedConfig.Builder(randomValidDatafeedId(), null)); } - public void testCheckValid_GivenNullIndices() { - DatafeedConfig.Builder conf = constructBuilder(); - expectThrows(NullPointerException.class, () -> conf.setIndices(null)); - } - - public void testCheckValid_GivenNullType() { - DatafeedConfig.Builder conf = constructBuilder(); - expectThrows(NullPointerException.class, () -> conf.setTypes(null)); - } - - public void testCheckValid_GivenNullQuery() { - DatafeedConfig.Builder conf = constructBuilder(); - expectThrows(NullPointerException.class, () -> conf.setQuery(null)); - } - public static String randomValidDatafeedId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java index 3dddad3c016..1c3723fd0a6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java @@ -18,19 +18,16 @@ */ package org.elasticsearch.client.ml.datafeed; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.AbstractXContentTestCase; +import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; public class DatafeedUpdateTests extends AbstractXContentTestCase { @@ -54,7 +51,11 @@ public class DatafeedUpdateTests extends AbstractXContentTestCase { - public CategoryDefinition createTestInstance(String jobId) { + public static CategoryDefinition createTestInstance(String jobId) { CategoryDefinition categoryDefinition = new CategoryDefinition(jobId); categoryDefinition.setCategoryId(randomLong()); categoryDefinition.setTerms(randomAlphaOfLength(10)); diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java index 13128b9478e..2d57644f9a7 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.unconfigurednodename; +package org.elasticsearch.test.rest; import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase; diff --git a/docs/build.gradle b/docs/build.gradle index c6a7a8d4837..f2a7f8511e3 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -57,6 +57,8 @@ integTestCluster { // TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults systemProperty 'es.scripting.use_java_time', 'false' systemProperty 'es.scripting.update.ctx_in_params', 'false' + //TODO: remove this once the cname is prepended to the address by default in 7.0 + systemProperty 'es.http.cname_in_publish_address', 'true' } // remove when https://github.com/elastic/elasticsearch/issues/31305 is fixed diff --git a/docs/java-rest/high-level/ml/delete-forecast.asciidoc b/docs/java-rest/high-level/ml/delete-forecast.asciidoc new file mode 100644 index 00000000000..09aa5c734ff --- /dev/null +++ b/docs/java-rest/high-level/ml/delete-forecast.asciidoc @@ -0,0 +1,78 @@ +[[java-rest-high-x-pack-ml-delete-forecast]] +=== Delete Forecast API + +The Delete Forecast API provides the ability to delete a {ml} job's +forecast in the cluster. +It accepts a `DeleteForecastRequest` object and responds +with an `AcknowledgedResponse` object. + +[[java-rest-high-x-pack-ml-delete-forecast-request]] +==== Delete Forecast Request + +A `DeleteForecastRequest` object gets created with an existing non-null `jobId`. +All other fields are optional for the request. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments + +The following arguments are optional. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-request-options] +-------------------------------------------------- +<1> Sets the specific forecastIds to delete, can be set to `_all` to indicate ALL forecasts for the given +`jobId` +<2> Set the timeout for the request to respond, default is 30 seconds +<3> Set the `allow_no_forecasts` option. When `true` no error will be returned if an `_all` +request finds no forecasts. It defaults to `true` + +[[java-rest-high-x-pack-ml-delete-forecast-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-delete-forecast-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-execute-async] +-------------------------------------------------- +<1> The `DeleteForecastRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `AcknowledgedResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-x-pack-ml-delete-forecast-response]] +==== Delete Forecast Response + +An `AcknowledgedResponse` contains an acknowledgement of the forecast(s) deletion + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-response] +-------------------------------------------------- +<1> `isAcknowledged()` indicates if the forecast was successfully deleted or not. diff --git a/docs/java-rest/high-level/ml/get-categories.asciidoc b/docs/java-rest/high-level/ml/get-categories.asciidoc new file mode 100644 index 00000000000..0e86a2b7f33 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-categories.asciidoc @@ -0,0 +1,83 @@ +[[java-rest-high-x-pack-ml-get-categories]] +=== Get Categories API + +The Get Categories API retrieves one or more category results. +It accepts a `GetCategoriesRequest` object and responds +with a `GetCategoriesResponse` object. + +[[java-rest-high-x-pack-ml-get-categories-request]] +==== Get Categories Request + +A `GetCategoriesRequest` object gets created with an existing non-null `jobId`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-category-id] +-------------------------------------------------- +<1> The id of the category to get. Otherwise it will return all categories. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-page] +-------------------------------------------------- +<1> The page parameters `from` and `size`. `from` specifies the number of categories to skip. +`size` specifies the maximum number of categories to get. Defaults to `0` and `100` respectively. + +[[java-rest-high-x-pack-ml-get-categories-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-execute] +-------------------------------------------------- + + +[[java-rest-high-x-pack-ml-get-categories-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-execute-async] +-------------------------------------------------- +<1> The `GetCategoriesRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back with the `onResponse` method +if the execution is successful or the `onFailure` method if the execution +failed. + +A typical listener for `GetCategoriesResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-snapshot-ml-get-categories-response]] +==== Get Categories Response + +The returned `GetCategoriesResponse` contains the requested categories: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-response] +-------------------------------------------------- +<1> The count of categories that were matched +<2> The categories retrieved \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/put-datafeed.asciidoc b/docs/java-rest/high-level/ml/put-datafeed.asciidoc new file mode 100644 index 00000000000..86c9d631726 --- /dev/null +++ b/docs/java-rest/high-level/ml/put-datafeed.asciidoc @@ -0,0 +1,124 @@ +[[java-rest-high-x-pack-ml-put-datafeed]] +=== Put Datafeed API + +The Put Datafeed API can be used to create a new {ml} datafeed +in the cluster. The API accepts a `PutDatafeedRequest` object +as a request and returns a `PutDatafeedResponse`. + +[[java-rest-high-x-pack-ml-put-datafeed-request]] +==== Put Datafeed Request + +A `PutDatafeedRequest` requires the following argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-request] +-------------------------------------------------- +<1> The configuration of the {ml} datafeed to create + +[[java-rest-high-x-pack-ml-put-datafeed-config]] +==== Datafeed Configuration + +The `DatafeedConfig` object contains all the details about the {ml} datafeed +configuration. + +A `DatafeedConfig` requires the following arguments: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config] +-------------------------------------------------- +<1> The datafeed ID and the job ID +<2> The indices that contain the data to retrieve and feed into the job + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-chunking-config] +-------------------------------------------------- +<1> Specifies how data searches are split into time chunks. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-frequency] +-------------------------------------------------- +<1> The interval at which scheduled queries are made while the datafeed runs in real time. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-query] +-------------------------------------------------- +<1> A query to filter the search results by. Defaults to the `match_all` query. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-query-delay] +-------------------------------------------------- +<1> The time interval behind real time that data is queried. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-script-fields] +-------------------------------------------------- +<1> Allows the use of script fields. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-scroll-size] +-------------------------------------------------- +<1> The `size` parameter used in the searches. + +[[java-rest-high-x-pack-ml-put-datafeed-execution]] +==== Execution + +The Put Datafeed API can be executed through a `MachineLearningClient` +instance. Such an instance can be retrieved from a `RestHighLevelClient` +using the `machineLearning()` method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-put-datafeed-response]] +==== Response + +The returned `PutDatafeedResponse` returns the full representation of +the new {ml} datafeed if it has been successfully created. This will +contain the creation time and other fields initialized using +default values: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-response] +-------------------------------------------------- +<1> The created datafeed + +[[java-rest-high-x-pack-ml-put-datafeed-async]] +==== Asynchronous Execution + +This request can be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-execute-async] +-------------------------------------------------- +<1> The `PutDatafeedRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `PutDatafeedResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument diff --git a/docs/java-rest/high-level/ml/put-job.asciidoc b/docs/java-rest/high-level/ml/put-job.asciidoc index d51bb63d405..8c726d63b16 100644 --- a/docs/java-rest/high-level/ml/put-job.asciidoc +++ b/docs/java-rest/high-level/ml/put-job.asciidoc @@ -142,7 +142,7 @@ This request can be executed asynchronously: -------------------------------------------------- include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-job-execute-async] -------------------------------------------------- -<1> The `PutMlJobRequest` to execute and the `ActionListener` to use when +<1> The `PutJobRequest` to execute and the `ActionListener` to use when the execution completes The asynchronous method does not block and returns immediately. Once it is diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 8d92653ce57..0be681a14d1 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -220,12 +220,15 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <> * <> * <> +* <> * <> +* <> * <> * <> * <> * <> * <> +* <> include::ml/put-job.asciidoc[] include::ml/get-job.asciidoc[] @@ -234,13 +237,16 @@ include::ml/open-job.asciidoc[] include::ml/close-job.asciidoc[] include::ml/update-job.asciidoc[] include::ml/flush-job.asciidoc[] +include::ml/put-datafeed.asciidoc[] include::ml/get-job-stats.asciidoc[] include::ml/forecast-job.asciidoc[] +include::ml/delete-forecast.asciidoc[] include::ml/get-buckets.asciidoc[] include::ml/get-overall-buckets.asciidoc[] include::ml/get-records.asciidoc[] include::ml/post-data.asciidoc[] include::ml/get-influencers.asciidoc[] +include::ml/get-categories.asciidoc[] == Migration APIs diff --git a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc index 3bfa8d91f8b..0726f5f927e 100644 --- a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc @@ -348,7 +348,7 @@ GET /_search \... will sort the composite bucket in descending order when comparing values from the `date_histogram` source and in ascending order when comparing values from the `terms` source. -====== Missing bucket +==== Missing bucket By default documents without a value for a given source are ignored. It is possible to include them in the response by setting `missing_bucket` to diff --git a/docs/reference/analysis/tokenfilters.asciidoc b/docs/reference/analysis/tokenfilters.asciidoc index f531bc5d0e9..41bb9d38afb 100644 --- a/docs/reference/analysis/tokenfilters.asciidoc +++ b/docs/reference/analysis/tokenfilters.asciidoc @@ -37,6 +37,8 @@ include::tokenfilters/multiplexer-tokenfilter.asciidoc[] include::tokenfilters/condition-tokenfilter.asciidoc[] +include::tokenfilters/predicate-tokenfilter.asciidoc[] + include::tokenfilters/stemmer-tokenfilter.asciidoc[] include::tokenfilters/stemmer-override-tokenfilter.asciidoc[] diff --git a/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc new file mode 100644 index 00000000000..bebf7bd80f2 --- /dev/null +++ b/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc @@ -0,0 +1,79 @@ +[[analysis-predicatefilter-tokenfilter]] +=== Predicate Token Filter Script + +The predicate_token_filter token filter takes a predicate script, and removes tokens that do +not match the predicate. + +[float] +=== Options +[horizontal] +script:: a predicate script that determines whether or not the current token will +be emitted. Note that only inline scripts are supported. + +[float] +=== Settings example + +You can set it up like: + +[source,js] +-------------------------------------------------- +PUT /condition_example +{ + "settings" : { + "analysis" : { + "analyzer" : { + "my_analyzer" : { + "tokenizer" : "standard", + "filter" : [ "my_script_filter" ] + } + }, + "filter" : { + "my_script_filter" : { + "type" : "predicate_token_filter", + "script" : { + "source" : "token.getTerm().length() > 5" <1> + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> This will emit tokens that are more than 5 characters long + +And test it like: + +[source,js] +-------------------------------------------------- +POST /condition_example/_analyze +{ + "analyzer" : "my_analyzer", + "text" : "What Flapdoodle" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +And it'd respond: + +[source,js] +-------------------------------------------------- +{ + "tokens": [ + { + "token": "Flapdoodle", <1> + "start_offset": 5, + "end_offset": 15, + "type": "", + "position": 1 <2> + } + ] +} +-------------------------------------------------- +// TESTRESPONSE + +<1> The token 'What' has been removed from the tokenstream because it does not +match the predicate. +<2> The position and offset values are unaffected by the removal of earlier tokens \ No newline at end of file diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index 7826afc05fa..9fb875da382 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -40,3 +40,16 @@ will be removed in the future, thus requiring HTTP to always be enabled. This setting has been removed, as disabling http pipelining support on the server provided little value. The setting `http.pipelining.max_events` can still be used to limit the number of pipelined requests in-flight. + +==== Cross-cluster search settings renamed + +The cross-cluster search remote cluster connection infrastructure is also used +in cross-cluster replication. This means that the setting names +`search.remote.*` used for configuring cross-cluster search belie the fact that +they also apply to other situations where a connection to a remote cluster as +used. Therefore, these settings have been renamed from `search.remote.*` to +`cluster.remote.*`. For backwards compatibility purposes, we will fallback to +`search.remote.*` if `cluster.remote.*` is not set. For any such settings stored +in the cluster state, or set on dynamic settings updates, we will automatically +upgrade the setting from `search.remote.*` to `cluster.remote.*`. The fallback +settings will be removed in 8.0.0. diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 0562a677a8d..ba6adf1d35f 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -207,6 +207,51 @@ repositories.url.allowed_urls: ["http://www.example.org/root/*", "https://*.mydo URL repositories with `file:` URLs can only point to locations registered in the `path.repo` setting similar to shared file system repository. +[float] +[role="xpack"] +[testenv="basic"] +===== Source Only Repository + +A source repository enables you to create minimal, source-only snapshots that take up to 50% less space on disk. +Source only snapshots contain stored fields and index metadata. They do not include index or doc values structures +and are not searchable when restored. After restoring a source-only snapshot, you must <> +the data into a new index. + +Source repositories delegate to another snapshot repository for storage. + + +[IMPORTANT] +================================================== + +Source only snapshots are only supported if the `_source` field is enabled and no source-filtering is applied. +When you restore a source only snapshot: + + * The restored index is read-only and can only serve `match_all` search or scroll requests to enable reindexing. + + * Queries other than `match_all` and `_get` requests are not supported. + + * The mapping of the restored index is empty, but the original mapping is available from the types top + level `meta` element. + +================================================== + +When you create a source repository, you must specify the type and name of the delegate repository +where the snapshots will be stored: + +[source,js] +----------------------------------- +PUT _snapshot/my_src_only_repository +{ + "type": "source", + "settings": { + "delegate_type": "fs", + "location": "my_backup_location" + } +} +----------------------------------- +// CONSOLE +// TEST[continued] + [float] ===== Repository plugins diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index acff4d3b036..5c397d603be 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -172,7 +172,7 @@ GET /_search The example above creates a boolean query: -`(ny OR (new AND york)) city)` +`(ny OR (new AND york)) city` that matches documents with the term `ny` or the conjunction `new AND york`. By default the parameter `auto_generate_synonyms_phrase_query` is set to `true`. diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index 2b522062ec0..ab52097a4c5 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -13,6 +13,9 @@ Every context mapping has a unique name and a type. There are two types: `catego and `geo`. Context mappings are configured under the `contexts` parameter in the field mapping. +NOTE: It is mandatory to provide a context when indexing and querying + a context enabled completion field. + The following defines types, each with two context mappings for a completion field: @@ -84,10 +87,6 @@ PUT place_path_category NOTE: Adding context mappings increases the index size for completion field. The completion index is entirely heap resident, you can monitor the completion field index size using <>. -NOTE: deprecated[7.0.0, Indexing a suggestion without context on a context enabled completion field is deprecated -and will be removed in the next major release. If you want to index a suggestion that matches all contexts you should -add a special context for it.] - [[suggester-context-category]] [float] ==== Category Context @@ -160,9 +159,9 @@ POST place/_search?pretty // CONSOLE // TEST[continued] -Note: deprecated[7.0.0, When no categories are provided at query-time, all indexed documents are considered. -Querying with no categories on a category enabled completion field is deprecated and will be removed in the next major release -as it degrades search performance considerably.] +NOTE: If multiple categories or category contexts are set on the query +they are merged as a disjunction. This means that suggestions match +if they contain at least one of the provided context values. Suggestions with certain categories can be boosted higher than others. The following filters suggestions by categories and additionally boosts @@ -218,6 +217,9 @@ multiple category context clauses. The following parameters are supported for a so on, by specifying a category prefix of 'type'. Defaults to `false` +NOTE: If a suggestion entry matches multiple contexts the final score is computed as the +maximum score produced by any matching contexts. + [[suggester-context-geo]] [float] ==== Geo location Context @@ -307,6 +309,10 @@ POST place/_search NOTE: When a location with a lower precision at query time is specified, all suggestions that fall within the area will be considered. +NOTE: If multiple categories or category contexts are set on the query +they are merged as a disjunction. This means that suggestions match +if they contain at least one of the provided context values. + Suggestions that are within an area represented by a geohash can also be boosted higher than others, as shown by the following: @@ -349,6 +355,9 @@ POST place/_search?pretty that fall under the geohash representation of '(43.6624803, -79.3863353)' with a default precision of '6' by a factor of `2` +NOTE: If a suggestion entry matches multiple contexts the final score is computed as the +maximum score produced by any matching contexts. + In addition to accepting context values, a context query can be composed of multiple context clauses. The following parameters are supported for a `category` context clause: diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index f2e9077e20e..dffdc48fe7b 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -295,8 +295,9 @@ as _properties_ within Windows Installer documentation) that can be passed to `m `SKIPSETTINGPASSWORDS`:: - When installing with a `Trial` license and X-Pack Security enabled, whether the - installation should skip setting up the built-in users `elastic`, `kibana` and `logstash_system`. + When installing with a `Trial` license and {security} enabled, whether the + installation should skip setting up the built-in users `elastic`, `kibana`, + `logstash_system`, `apm_system`, and `beats_system`. Defaults to `false` `ELASTICUSERPASSWORD`:: diff --git a/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java b/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java index 67663516167..493d809f9dc 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java +++ b/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java @@ -20,6 +20,7 @@ package org.elasticsearch.core.internal.io; import java.io.Closeable; import java.io.IOException; import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; import java.nio.file.FileVisitResult; import java.nio.file.FileVisitor; import java.nio.file.Files; @@ -36,6 +37,14 @@ import java.util.Map; */ public final class IOUtils { + /** + * UTF-8 charset string. + *

Where possible, use {@link StandardCharsets#UTF_8} instead, + * as using the String constant may slow things down. + * @see StandardCharsets#UTF_8 + */ + public static final String UTF_8 = StandardCharsets.UTF_8.name(); + private IOUtils() { // Static utils methods } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPredicateScript.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPredicateScript.java index 7de588a958c..3bda6f393bf 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPredicateScript.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPredicateScript.java @@ -19,6 +19,13 @@ package org.elasticsearch.analysis.common; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.KeywordAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; +import org.apache.lucene.analysis.tokenattributes.TypeAttribute; +import org.apache.lucene.util.AttributeSource; import org.elasticsearch.script.ScriptContext; /** @@ -30,21 +37,40 @@ public abstract class AnalysisPredicateScript { * Encapsulation of the state of the current token */ public static class Token { - public CharSequence term; - public int pos; - public int posInc; - public int posLen; - public int startOffset; - public int endOffset; - public String type; - public boolean isKeyword; + + private final CharTermAttribute termAtt; + private final PositionIncrementAttribute posIncAtt; + private final PositionLengthAttribute posLenAtt; + private final OffsetAttribute offsetAtt; + private final TypeAttribute typeAtt; + private final KeywordAttribute keywordAtt; + + // posInc is always 1 at the beginning of a tokenstream and the convention + // from the _analyze endpoint is that tokenstream positions are 0-based + private int pos = -1; + + /** + * Create a token exposing values from an AttributeSource + */ + public Token(AttributeSource source) { + this.termAtt = source.addAttribute(CharTermAttribute.class); + this.posIncAtt = source.addAttribute(PositionIncrementAttribute.class); + this.posLenAtt = source.addAttribute(PositionLengthAttribute.class); + this.offsetAtt = source.addAttribute(OffsetAttribute.class); + this.typeAtt = source.addAttribute(TypeAttribute.class); + this.keywordAtt = source.addAttribute(KeywordAttribute.class); + } + + public void updatePosition() { + this.pos = this.pos + posIncAtt.getPositionIncrement(); + } public CharSequence getTerm() { - return term; + return termAtt; } public int getPositionIncrement() { - return posInc; + return posIncAtt.getPositionIncrement(); } public int getPosition() { @@ -52,23 +78,23 @@ public abstract class AnalysisPredicateScript { } public int getPositionLength() { - return posLen; + return posLenAtt.getPositionLength(); } public int getStartOffset() { - return startOffset; + return offsetAtt.startOffset(); } public int getEndOffset() { - return endOffset; + return offsetAtt.endOffset(); } public String getType() { - return type; + return typeAtt.type(); } public boolean isKeyword() { - return isKeyword; + return keywordAtt.isKeyword(); } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 75ebade0b12..175935258ad 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -264,6 +264,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceTokenFilterFactory::new)); filters.put("persian_normalization", PersianNormalizationFilterFactory::new); filters.put("porter_stem", PorterStemTokenFilterFactory::new); + filters.put("predicate_token_filter", + requiresAnalysisSettings((i, e, n, s) -> new PredicateTokenFilterScriptFactory(i, n, s, scriptService.get()))); filters.put("remove_duplicates", RemoveDuplicatesTokenFilterFactory::new); filters.put("reverse", ReverseTokenFilterFactory::new); filters.put("russian_stem", RussianStemTokenFilterFactory::new); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PredicateTokenFilterScriptFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PredicateTokenFilterScriptFactory.java new file mode 100644 index 00000000000..84f4bb48706 --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PredicateTokenFilterScriptFactory.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.FilteringTokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; + +import java.io.IOException; + +/** + * A factory for creating FilteringTokenFilters that determine whether or not to + * accept their underlying token by consulting a script + */ +public class PredicateTokenFilterScriptFactory extends AbstractTokenFilterFactory { + + private final AnalysisPredicateScript.Factory factory; + + public PredicateTokenFilterScriptFactory(IndexSettings indexSettings, String name, Settings settings, ScriptService scriptService) { + super(indexSettings, name, settings); + Settings scriptSettings = settings.getAsSettings("script"); + Script script = Script.parse(scriptSettings); + if (script.getType() != ScriptType.INLINE) { + throw new IllegalArgumentException("Cannot use stored scripts in tokenfilter [" + name + "]"); + } + this.factory = scriptService.compile(script, AnalysisPredicateScript.CONTEXT); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return new ScriptFilteringTokenFilter(tokenStream, factory.newInstance()); + } + + private static class ScriptFilteringTokenFilter extends FilteringTokenFilter { + + final AnalysisPredicateScript script; + final AnalysisPredicateScript.Token token; + + ScriptFilteringTokenFilter(TokenStream in, AnalysisPredicateScript script) { + super(in); + this.script = script; + this.token = new AnalysisPredicateScript.Token(this); + } + + @Override + protected boolean accept() throws IOException { + token.updatePosition(); + return script.execute(token); + } + } +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterFactory.java index cf7fd5b047a..56f60bb874a 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterFactory.java @@ -21,12 +21,6 @@ package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.ConditionalTokenFilter; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.analysis.tokenattributes.KeywordAttribute; -import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; -import org.apache.lucene.analysis.tokenattributes.TypeAttribute; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; @@ -36,6 +30,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -76,30 +71,26 @@ public class ScriptedConditionTokenFilterFactory extends AbstractTokenFilterFact } return in; }; - AnalysisPredicateScript script = factory.newInstance(); - final AnalysisPredicateScript.Token token = new AnalysisPredicateScript.Token(); - return new ConditionalTokenFilter(tokenStream, filter) { + return new ScriptedConditionTokenFilter(tokenStream, filter, factory.newInstance()); + } - CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); - PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class); - OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); - TypeAttribute typeAtt = addAttribute(TypeAttribute.class); - KeywordAttribute keywordAtt = addAttribute(KeywordAttribute.class); + private static class ScriptedConditionTokenFilter extends ConditionalTokenFilter { - @Override - protected boolean shouldFilter() { - token.term = termAtt; - token.posInc = posIncAtt.getPositionIncrement(); - token.pos += token.posInc; - token.posLen = posLenAtt.getPositionLength(); - token.startOffset = offsetAtt.startOffset(); - token.endOffset = offsetAtt.endOffset(); - token.type = typeAtt.type(); - token.isKeyword = keywordAtt.isKeyword(); - return script.execute(token); - } - }; + private final AnalysisPredicateScript script; + private final AnalysisPredicateScript.Token token; + + ScriptedConditionTokenFilter(TokenStream input, Function inputFactory, + AnalysisPredicateScript script) { + super(input, inputFactory); + this.script = script; + this.token = new AnalysisPredicateScript.Token(this); + } + + @Override + protected boolean shouldFilter() throws IOException { + token.updatePosition(); + return script.execute(token); + } } @Override diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java new file mode 100644 index 00000000000..18afbdcecb3 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESTokenStreamTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.io.IOException; +import java.util.Collections; + +public class PredicateTokenScriptFilterTests extends ESTokenStreamTestCase { + + public void testSimpleFilter() throws IOException { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("index.analysis.filter.f.type", "predicate_token_filter") + .put("index.analysis.filter.f.script.source", "token.getTerm().length() > 5") + .put("index.analysis.analyzer.myAnalyzer.type", "custom") + .put("index.analysis.analyzer.myAnalyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.myAnalyzer.filter", "f") + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + + AnalysisPredicateScript.Factory factory = () -> new AnalysisPredicateScript() { + @Override + public boolean execute(Token token) { + return token.getTerm().length() > 5; + } + }; + + @SuppressWarnings("unchecked") + ScriptService scriptService = new ScriptService(indexSettings, Collections.emptyMap(), Collections.emptyMap()){ + @Override + public FactoryType compile(Script script, ScriptContext context) { + assertEquals(context, AnalysisPredicateScript.CONTEXT); + assertEquals(new Script("token.getTerm().length() > 5"), script); + return (FactoryType) factory; + } + }; + + CommonAnalysisPlugin plugin = new CommonAnalysisPlugin(); + plugin.createComponents(null, null, null, null, scriptService, null, null, null, null); + AnalysisModule module + = new AnalysisModule(TestEnvironment.newEnvironment(settings), Collections.singletonList(plugin)); + + IndexAnalyzers analyzers = module.getAnalysisRegistry().build(idxSettings); + + try (NamedAnalyzer analyzer = analyzers.get("myAnalyzer")) { + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "Vorsprung Durch Technik", new String[]{ + "Vorsprung", "Technik" + }); + } + + } + +} diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml index 4305e5db0af..2015fe31fcc 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml @@ -28,9 +28,44 @@ - type: condition filter: [ "lowercase" ] script: - source: "token.position > 1 && token.positionIncrement > 0 && token.startOffset > 0 && token.endOffset > 0 && (token.positionLength == 1 || token.type == \"a\" || token.keyword)" + source: "token.position >= 1 && token.positionIncrement > 0 && token.startOffset > 0 && token.endOffset > 0 && (token.positionLength == 1 || token.type == \"a\" || token.keyword)" - length: { tokens: 3 } - match: { tokens.0.token: "Vorsprung" } - match: { tokens.1.token: "durch" } - match: { tokens.2.token: "technik" } + +--- +"script_filter": + - do: + indices.analyze: + body: + text: "Vorsprung Durch Technik" + tokenizer: "whitespace" + filter: + - type: predicate_token_filter + script: + source: "token.term.length() > 5" + + - length: { tokens: 2 } + - match: { tokens.0.token: "Vorsprung" } + - match: { tokens.1.token: "Technik" } + +--- +"script_filter_position": + - do: + indices.analyze: + body: + text: "a b c d e f g h" + tokenizer: "whitespace" + filter: + - type: predicate_token_filter + script: + source: "token.position >= 4" + + - length: { tokens: 4 } + - match: { tokens.0.token: "e" } + - match: { tokens.1.token: "f" } + - match: { tokens.2.token: "g" } + - match: { tokens.3.token: "h" } + diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index ed4b1d631e0..6bec6f50626 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -26,6 +26,7 @@ integTestCluster { module project.project(':modules:mapper-extras') systemProperty 'es.scripting.use_java_time', 'true' systemProperty 'es.scripting.update.ctx_in_params', 'false' + systemProperty 'es.http.cname_in_publish_address', 'true' } dependencies { diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 5d248b22caf..c56a9a8259a 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -32,19 +32,23 @@ esplugin { } versions << [ - 'aws': '1.11.223' + 'aws': '1.11.406' ] dependencies { compile "com.amazonaws:aws-java-sdk-s3:${versions.aws}" compile "com.amazonaws:aws-java-sdk-kms:${versions.aws}" compile "com.amazonaws:aws-java-sdk-core:${versions.aws}" + compile "com.amazonaws:jmespath-java:${versions.aws}" compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-codec:commons-codec:${versions.commonscodec}" + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" compile 'com.fasterxml.jackson.core:jackson-databind:2.6.7.1' compile 'com.fasterxml.jackson.core:jackson-annotations:2.6.0' + compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" + compile 'joda-time:joda-time:2.10' // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, // and whitelist this hack in JarHell @@ -53,6 +57,7 @@ dependencies { dependencyLicenses { mapping from: /aws-java-sdk-.*/, to: 'aws-java-sdk' + mapping from: /jmespath-java.*/, to: 'aws-java-sdk' mapping from: /jackson-.*/, to: 'jackson' mapping from: /jaxb-.*/, to: 'jaxb' } diff --git a/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.223.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.223.jar.sha1 deleted file mode 100644 index 9890dd8d600..00000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.223.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c3993cb44f5856fa721b7b7ccfc266377c0bf9c0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.406.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.406.jar.sha1 new file mode 100644 index 00000000000..415373b275e --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.406.jar.sha1 @@ -0,0 +1 @@ +43f3b7332d4d527bbf34d4ac6be094f3dabec6de \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.223.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.223.jar.sha1 deleted file mode 100644 index d5bc9d30308..00000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.223.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c24e6ebe108c60a08098aeaad5ae0b6a5a77b618 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.406.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.406.jar.sha1 new file mode 100644 index 00000000000..f0eb9b71752 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.406.jar.sha1 @@ -0,0 +1 @@ +e29854e58dc20f5453c1da7e580a5921b1e9714a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.223.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.223.jar.sha1 deleted file mode 100644 index fe12b2d4847..00000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.223.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c2ef96732e22d97952fbcd0a94f1dc376d157eda \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.406.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.406.jar.sha1 new file mode 100644 index 00000000000..e57fd11c829 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.406.jar.sha1 @@ -0,0 +1 @@ +5c3c2c57b076602b3aeef841c63e5848ec52b00d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jmespath-java-1.11.406.jar.sha1 b/plugins/repository-s3/licenses/jmespath-java-1.11.406.jar.sha1 new file mode 100644 index 00000000000..bbb9b562a2f --- /dev/null +++ b/plugins/repository-s3/licenses/jmespath-java-1.11.406.jar.sha1 @@ -0,0 +1 @@ +06c291d1029943d4968a36fadffa3b71a6d8b4e4 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index b177686bd71..a431f4da1fd 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -23,10 +23,12 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper; +import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.http.IdleConnectionReaper; import com.amazonaws.internal.StaticCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.s3.internal.Constants; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; @@ -93,19 +95,26 @@ class S3Service extends AbstractComponent implements Closeable { } } - private AmazonS3 buildClient(S3ClientSettings clientSettings) { - final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); - final ClientConfiguration configuration = buildConfiguration(clientSettings); - final AmazonS3 client = buildClient(credentials, configuration); - if (Strings.hasText(clientSettings.endpoint)) { - client.setEndpoint(clientSettings.endpoint); - } - return client; - } - // proxy for testing - AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { - return new AmazonS3Client(credentials, configuration); + AmazonS3 buildClient(final S3ClientSettings clientSettings) { + final AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); + builder.withCredentials(buildCredentials(logger, clientSettings)); + builder.withClientConfiguration(buildConfiguration(clientSettings)); + + final String endpoint = Strings.hasLength(clientSettings.endpoint) ? clientSettings.endpoint : Constants.S3_HOSTNAME; + logger.debug("using endpoint [{}]", endpoint); + + // If the endpoint configuration isn't set on the builder then the default behaviour is to try + // and work out what region we are in and use an appropriate endpoint - see AwsClientBuilder#setRegion. + // In contrast, directly-constructed clients use s3.amazonaws.com unless otherwise instructed. We currently + // use a directly-constructed client, and need to keep the existing behaviour to avoid a breaking change, + // so to move to using the builder we must set it explicitly to keep the existing behaviour. + // + // We do this because directly constructing the client is deprecated (was already deprecated in 1.1.223 too) + // so this change removes that usage of a deprecated API. + builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, null)); + + return builder.build(); } // pkg private for tests diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 7eb603b4b78..17797a57583 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; @@ -70,9 +69,9 @@ public class RepositoryCredentialsTests extends ESTestCase { } @Override - AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { - final AmazonS3 client = super.buildClient(credentials, configuration); - return new ClientAndCredentials(client, credentials); + AmazonS3 buildClient(final S3ClientSettings clientSettings) { + final AmazonS3 client = super.buildClient(clientSettings); + return new ClientAndCredentials(client, buildCredentials(logger, clientSettings)); } } diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index ca8371e30e7..8b305462e4d 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -53,9 +53,6 @@ for (Version version : bwcVersions.indexCompatible) { // some tests rely on the translog not being flushed setting 'indices.memory.shard_inactive_time', '20m' - // debug logging for testRecovery - setting 'logger.level', 'DEBUG' - if (version.onOrAfter('5.3.0')) { setting 'http.content_type.required', 'true' } @@ -75,9 +72,6 @@ for (Version version : bwcVersions.indexCompatible) { // some tests rely on the translog not being flushed setting 'indices.memory.shard_inactive_time', '20m' - // debug logging for testRecovery - setting 'logger.level', 'DEBUG' - numNodes = 2 dataDir = { nodeNum -> oldClusterTest.nodes[nodeNum].dataDir } cleanShared = false // We want to keep snapshots made by the old cluster! diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 80bed9db5f3..7efebd1d54a 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -68,10 +68,8 @@ import static org.hamcrest.Matchers.notNullValue; * version is started with the same data directories and then this is rerun * with {@code tests.is_old_cluster} set to {@code false}. */ -public class FullClusterRestartIT extends ESRestTestCase { - private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); - private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); - private final boolean supportsLenientBooleans = oldClusterVersion.before(Version.V_6_0_0_alpha1); +public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { + private final boolean supportsLenientBooleans = getOldClusterVersion().before(Version.V_6_0_0_alpha1); private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0"); private String index; @@ -81,29 +79,9 @@ public class FullClusterRestartIT extends ESRestTestCase { index = getTestName().toLowerCase(Locale.ROOT); } - @Override - protected boolean preserveIndicesUponCompletion() { - return true; - } - - @Override - protected boolean preserveSnapshotsUponCompletion() { - return true; - } - - @Override - protected boolean preserveReposUponCompletion() { - return true; - } - - @Override - protected boolean preserveTemplatesUponCompletion() { - return true; - } - public void testSearch() throws Exception { int count; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -169,7 +147,7 @@ public class FullClusterRestartIT extends ESRestTestCase { } public void testNewReplicasWork() throws Exception { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -237,10 +215,10 @@ public class FullClusterRestartIT extends ESRestTestCase { */ public void testAliasWithBadName() throws Exception { assumeTrue("Can only test bad alias name if old cluster is on 5.1.0 or before", - oldClusterVersion.before(VERSION_5_1_0_UNRELEASED)); + getOldClusterVersion().before(VERSION_5_1_0_UNRELEASED)); int count; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -291,7 +269,7 @@ public class FullClusterRestartIT extends ESRestTestCase { Map searchRsp = entityAsMap(client().performRequest(new Request("GET", "/" + aliasName + "/_search"))); int totalHits = (int) XContentMapValues.extractValue("hits.total", searchRsp); assertEquals(count, totalHits); - if (runningAgainstOldCluster == false) { + if (isRunningAgainstOldCluster() == false) { // We can remove the alias. Response response = client().performRequest(new Request("DELETE", "/" + index + "/_alias/" + aliasName)); assertEquals(200, response.getStatusLine().getStatusCode()); @@ -302,7 +280,7 @@ public class FullClusterRestartIT extends ESRestTestCase { } public void testClusterState() throws Exception { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); mappingsAndSettings.field("template", index); @@ -341,14 +319,14 @@ public class FullClusterRestartIT extends ESRestTestCase { assertEquals("0", numberOfReplicas); Version version = Version.fromId(Integer.valueOf((String) XContentMapValues.extractValue("metadata.indices." + index + ".settings.index.version.created", clusterState))); - assertEquals(oldClusterVersion, version); + assertEquals(getOldClusterVersion(), version); } public void testShrink() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -413,7 +391,7 @@ public class FullClusterRestartIT extends ESRestTestCase { public void testShrinkAfterUpgrade() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -465,7 +443,7 @@ public class FullClusterRestartIT extends ESRestTestCase { int totalHits = (int) XContentMapValues.extractValue("hits.total", response); assertEquals(numDocs, totalHits); - if (runningAgainstOldCluster == false) { + if (isRunningAgainstOldCluster() == false) { response = entityAsMap(client().performRequest(new Request("GET", "/" + shrunkenIndex + "/_search"))); assertNoFailures(response); totalShards = (int) XContentMapValues.extractValue("_shards.total", response); @@ -490,7 +468,7 @@ public class FullClusterRestartIT extends ESRestTestCase { * */ public void testRollover() throws IOException { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Request createIndex = new Request("PUT", "/" + index + "-000001"); createIndex.setJsonEntity("{" + " \"aliases\": {" @@ -511,7 +489,7 @@ public class FullClusterRestartIT extends ESRestTestCase { bulkRequest.addParameter("refresh", ""); assertThat(EntityUtils.toString(client().performRequest(bulkRequest).getEntity()), containsString("\"errors\":false")); - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover"); rolloverRequest.setJsonEntity("{" + " \"conditions\": {" @@ -529,7 +507,7 @@ public class FullClusterRestartIT extends ESRestTestCase { Map count = entityAsMap(client().performRequest(countRequest)); assertNoFailures(count); - int expectedCount = bulkCount + (runningAgainstOldCluster ? 0 : bulkCount); + int expectedCount = bulkCount + (isRunningAgainstOldCluster() ? 0 : bulkCount); assertEquals(expectedCount, (int) XContentMapValues.extractValue("hits.total", count)); } @@ -688,7 +666,7 @@ public class FullClusterRestartIT extends ESRestTestCase { String docLocation = "/" + index + "/doc/1"; String doc = "{\"test\": \"test\"}"; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Request createDoc = new Request("PUT", docLocation); createDoc.setJsonEntity(doc); client().performRequest(createDoc); @@ -703,7 +681,7 @@ public class FullClusterRestartIT extends ESRestTestCase { public void testEmptyShard() throws IOException { final String index = "test_empty_shard"; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) @@ -726,7 +704,7 @@ public class FullClusterRestartIT extends ESRestTestCase { public void testRecovery() throws Exception { int count; boolean shouldHaveTranslog; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { count = between(200, 300); /* We've had bugs in the past where we couldn't restore * an index without a translog so we randomize whether @@ -772,7 +750,7 @@ public class FullClusterRestartIT extends ESRestTestCase { String countResponse = toStr(client().performRequest(countRequest)); assertThat(countResponse, containsString("\"total\":" + count)); - if (false == runningAgainstOldCluster) { + if (false == isRunningAgainstOldCluster()) { boolean restoredFromTranslog = false; boolean foundPrimary = false; Request recoveryRequest = new Request("GET", "/_cat/recovery/" + index); @@ -800,7 +778,7 @@ public class FullClusterRestartIT extends ESRestTestCase { assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog); String currentLuceneVersion = Version.CURRENT.luceneVersion.toString(); - String bwcLuceneVersion = oldClusterVersion.luceneVersion.toString(); + String bwcLuceneVersion = getOldClusterVersion().luceneVersion.toString(); if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) { int numCurrentVersion = 0; int numBwcVersion = 0; @@ -840,7 +818,7 @@ public class FullClusterRestartIT extends ESRestTestCase { */ public void testSnapshotRestore() throws IOException { int count; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { // Create the index count = between(200, 300); indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); @@ -860,7 +838,7 @@ public class FullClusterRestartIT extends ESRestTestCase { // Stick a routing attribute into to cluster settings so we can see it after the restore Request addRoutingSettings = new Request("PUT", "/_cluster/settings"); addRoutingSettings.setJsonEntity( - "{\"persistent\": {\"cluster.routing.allocation.exclude.test_attr\": \"" + oldClusterVersion + "\"}}"); + "{\"persistent\": {\"cluster.routing.allocation.exclude.test_attr\": \"" + getOldClusterVersion() + "\"}}"); client().performRequest(addRoutingSettings); // Stick a template into the cluster so we can see it after the restore @@ -885,7 +863,7 @@ public class FullClusterRestartIT extends ESRestTestCase { templateBuilder.startObject("alias2"); { templateBuilder.startObject("filter"); { templateBuilder.startObject("term"); { - templateBuilder.field("version", runningAgainstOldCluster ? oldClusterVersion : Version.CURRENT); + templateBuilder.field("version", isRunningAgainstOldCluster() ? getOldClusterVersion() : Version.CURRENT); } templateBuilder.endObject(); } @@ -898,7 +876,7 @@ public class FullClusterRestartIT extends ESRestTestCase { createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); client().performRequest(createTemplateRequest); - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { // Create the repo XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); { repoConfig.field("type", "fs"); @@ -914,19 +892,19 @@ public class FullClusterRestartIT extends ESRestTestCase { client().performRequest(createRepoRequest); } - Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (runningAgainstOldCluster ? "old_snap" : "new_snap")); + Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (isRunningAgainstOldCluster() ? "old_snap" : "new_snap")); createSnapshot.addParameter("wait_for_completion", "true"); createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); client().performRequest(createSnapshot); - checkSnapshot("old_snap", count, oldClusterVersion); - if (false == runningAgainstOldCluster) { + checkSnapshot("old_snap", count, getOldClusterVersion()); + if (false == isRunningAgainstOldCluster()) { checkSnapshot("new_snap", count, Version.CURRENT); } } public void testHistoryUUIDIsAdded() throws Exception { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -1019,20 +997,14 @@ public class FullClusterRestartIT extends ESRestTestCase { Request clusterSettingsRequest = new Request("GET", "/_cluster/settings"); clusterSettingsRequest.addParameter("flat_settings", "true"); Map clusterSettingsResponse = entityAsMap(client().performRequest(clusterSettingsRequest)); - Map expectedClusterSettings = new HashMap<>(); - expectedClusterSettings.put("transient", emptyMap()); - expectedClusterSettings.put("persistent", - singletonMap("cluster.routing.allocation.exclude.test_attr", oldClusterVersion.toString())); - if (expectedClusterSettings.equals(clusterSettingsResponse) == false) { - NotEqualMessageBuilder builder = new NotEqualMessageBuilder(); - builder.compareMaps(clusterSettingsResponse, expectedClusterSettings); - fail("settings don't match:\n" + builder.toString()); - } + @SuppressWarnings("unchecked") final Map persistentSettings = + (Map)clusterSettingsResponse.get("persistent"); + assertThat(persistentSettings.get("cluster.routing.allocation.exclude.test_attr"), equalTo(getOldClusterVersion().toString())); // Check that the template was restored successfully Map getTemplateResponse = entityAsMap(client().performRequest(new Request("GET", "/_template/test_template"))); Map expectedTemplate = new HashMap<>(); - if (runningAgainstOldCluster && oldClusterVersion.before(Version.V_6_0_0_beta1)) { + if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_0_0_beta1)) { expectedTemplate.put("template", "evil_*"); } else { expectedTemplate.put("index_patterns", singletonList("evil_*")); diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java new file mode 100644 index 00000000000..19fbdc92fae --- /dev/null +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.transport.RemoteClusterService; + +import java.io.IOException; +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.transport.RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS; +import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE; +import static org.hamcrest.Matchers.equalTo; + +public class FullClusterRestartSettingsUpgradeIT extends AbstractFullClusterRestartTestCase { + + public void testRemoteClusterSettingsUpgraded() throws IOException { + assumeTrue("settings automatically upgraded since 6.5.0", getOldClusterVersion().before(Version.V_6_5_0)); + if (isRunningAgainstOldCluster()) { + final Request putSettingsRequest = new Request("PUT", "/_cluster/settings"); + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("persistent"); + { + builder.field("search.remote.foo.skip_unavailable", true); + builder.field("search.remote.foo.seeds", Collections.singletonList("localhost:9200")); + } + builder.endObject(); + } + builder.endObject(); + putSettingsRequest.setJsonEntity(Strings.toString(builder)); + } + client().performRequest(putSettingsRequest); + + final Request getSettingsRequest = new Request("GET", "/_cluster/settings"); + final Response response = client().performRequest(getSettingsRequest); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, response.getEntity().getContent())) { + final ClusterGetSettingsResponse clusterGetSettingsResponse = ClusterGetSettingsResponse.fromXContent(parser); + final Settings settings = clusterGetSettingsResponse.getPersistentSettings(); + + assertTrue(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").get(settings)); + assertTrue(SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); + assertThat( + SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").get(settings), + equalTo(Collections.singletonList("localhost:9200"))); + } + + assertSettingDeprecationsAndWarnings(new Setting[]{ + SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo"), + SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo")}); + } else { + final Request getSettingsRequest = new Request("GET", "/_cluster/settings"); + final Response getSettingsResponse = client().performRequest(getSettingsRequest); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, getSettingsResponse.getEntity().getContent())) { + final ClusterGetSettingsResponse clusterGetSettingsResponse = ClusterGetSettingsResponse.fromXContent(parser); + final Settings settings = clusterGetSettingsResponse.getPersistentSettings(); + + assertFalse(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue( + settings.toString(), + RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue(RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").get(settings)); + assertFalse(SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue(RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); + assertThat( + RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").get(settings), + equalTo(Collections.singletonList("localhost:9200"))); + } + } + } + +} diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index 49a9dec870e..2b7250f86b7 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -20,10 +20,8 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; @@ -48,7 +46,6 @@ import org.elasticsearch.index.query.SpanTermQueryBuilder; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.rest.ESRestTestCase; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -71,7 +68,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the * json format of a query being tested here then feel free to change this. */ -public class QueryBuilderBWCIT extends ESRestTestCase { +public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase { private static final List CANDIDATES = new ArrayList<>(); @@ -145,32 +142,9 @@ public class QueryBuilderBWCIT extends ESRestTestCase { CANDIDATES.add(new Object[]{"{\"query\": {" + querySource + "}}", expectedQb}); } - private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); - private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); - - @Override - protected boolean preserveIndicesUponCompletion() { - return true; - } - - @Override - protected boolean preserveSnapshotsUponCompletion() { - return true; - } - - @Override - protected boolean preserveReposUponCompletion() { - return true; - } - - @Override - protected boolean preserveTemplatesUponCompletion() { - return true; - } - public void testQueryBuilderBWC() throws Exception { String index = "queries"; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -230,7 +204,7 @@ public class QueryBuilderBWCIT extends ESRestTestCase { byte[] qbSource = Base64.getDecoder().decode(queryBuilderStr); try (InputStream in = new ByteArrayInputStream(qbSource, 0, qbSource.length)) { try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), registry)) { - input.setVersion(oldClusterVersion); + input.setVersion(getOldClusterVersion()); QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class); assert in.read() == -1; assertEquals(expectedQueryBuilder, queryBuilder); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml index 59692873cc4..2725580d9e8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml @@ -139,12 +139,26 @@ setup: features: warnings - do: warnings: - - 'Doc-value field [count] is not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with the doc value field in order to opt in for the future behaviour and ease the migration to 7.0.' + - 'There are doc-value fields which are not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with a doc value field in order to opt in for the future behaviour and ease the migration to 7.0: [count]' search: body: docvalue_fields: [ "count" ] - match: { hits.hits.0.fields.count: [1] } +--- +"multiple docvalue_fields": + - skip: + version: " - 6.3.99" + reason: format option was added in 6.4 + features: warnings + - do: + warnings: + - 'There are doc-value fields which are not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with a doc value field in order to opt in for the future behaviour and ease the migration to 7.0: [count, include.field1.keyword]' + search: + body: + docvalue_fields: [ "count", "include.field1.keyword" ] + - match: { hits.hits.0.fields.count: [1] } + --- "docvalue_fields as url param": - skip: @@ -153,7 +167,7 @@ setup: features: warnings - do: warnings: - - 'Doc-value field [count] is not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with the doc value field in order to opt in for the future behaviour and ease the migration to 7.0.' + - 'There are doc-value fields which are not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with a doc value field in order to opt in for the future behaviour and ease the migration to 7.0: [count]' search: docvalue_fields: [ "count" ] - match: { hits.hits.0.fields.count: [1] } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 5459805416e..a9d83cfbce6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -39,6 +39,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; @@ -171,6 +172,12 @@ public class TransportResizeAction extends TransportMasterNodeAction sourceSettingsPredicate = - (s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || s.startsWith("index.sort.")) + (s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || + s.startsWith("index.sort.") || s.equals("index.soft_deletes.enabled")) && indexSettingsBuilder.keys().contains(s) == false; builder.put(sourceMetaData.getSettings().filter(sourceSettingsPredicate)); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index e25d954aa4f..e87b3757e6b 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; -import java.util.AbstractMap; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -54,7 +53,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { private final List> settingUpdaters = new CopyOnWriteArrayList<>(); private final Map> complexMatchers; private final Map> keySettings; - private final Map, Function, Map.Entry>> settingUpgraders; + private final Map, SettingUpgrader> settingUpgraders; private final Setting.Property scope; private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$"); private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$"); @@ -70,12 +69,8 @@ public abstract class AbstractScopedSettings extends AbstractComponent { this.settingUpgraders = Collections.unmodifiableMap( - settingUpgraders - .stream() - .collect( - Collectors.toMap( - SettingUpgrader::getSetting, - u -> e -> new AbstractMap.SimpleEntry<>(u.getKey(e.getKey()), u.getValue(e.getValue()))))); + settingUpgraders.stream().collect(Collectors.toMap(SettingUpgrader::getSetting, Function.identity()))); + this.scope = scope; Map> complexMatchers = new HashMap<>(); @@ -786,15 +781,25 @@ public abstract class AbstractScopedSettings extends AbstractComponent { boolean changed = false; // track if any settings were upgraded for (final String key : settings.keySet()) { final Setting setting = getRaw(key); - final Function, Map.Entry> upgrader = settingUpgraders.get(setting); + final SettingUpgrader upgrader = settingUpgraders.get(setting); if (upgrader == null) { // the setting does not have an upgrader, copy the setting builder.copy(key, settings); } else { // the setting has an upgrader, so mark that we have changed a setting and apply the upgrade logic changed = true; - final Map.Entry upgrade = upgrader.apply(new Entry(key, settings)); - builder.put(upgrade.getKey(), upgrade.getValue()); + // noinspection ConstantConditions + if (setting.getConcreteSetting(key).isListSetting()) { + final List value = settings.getAsList(key); + final String upgradedKey = upgrader.getKey(key); + final List upgradedValue = upgrader.getListValue(value); + builder.putList(upgradedKey, upgradedValue); + } else { + final String value = settings.get(key); + final String upgradedKey = upgrader.getKey(key); + final String upgradedValue = upgrader.getValue(value); + builder.put(upgradedKey, upgradedValue); + } } } // we only return a new instance if there was an upgrade diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index cb369d6cfda..7e90aa3f442 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -443,6 +443,9 @@ public final class ClusterSettings extends AbstractScopedSettings { EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING ))); - public static List> BUILT_IN_SETTING_UPGRADERS = Collections.emptyList(); + public static List> BUILT_IN_SETTING_UPGRADERS = Collections.unmodifiableList(Arrays.asList( + RemoteClusterAware.SEARCH_REMOTE_CLUSTER_SEEDS_UPGRADER, + RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY_UPGRADER, + RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE_UPGRADER)); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 89bbe752a1f..5244cdd726d 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -345,6 +345,11 @@ public class Setting implements ToXContentObject { return false; } + + final boolean isListSetting() { + return this instanceof ListSetting; + } + boolean hasComplexMatcher() { return isGroupSetting(); } @@ -453,7 +458,7 @@ public class Setting implements ToXContentObject { * @return the raw string representation of the setting value */ String innerGetRaw(final Settings settings) { - return settings.get(getKey(), defaultValue.apply(settings)); + return settings.get(getKey(), defaultValue.apply(settings), isListSetting()); } /** Logs a deprecation warning if the setting is deprecated and used. */ @@ -1305,7 +1310,6 @@ public class Setting implements ToXContentObject { } } } - } static void logSettingUpdate(Setting setting, Settings current, Settings previous, Logger logger) { diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java b/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java index 91f2bead300..bc41b554905 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.settings; +import java.util.List; + /** * Represents the logic to upgrade a setting. * @@ -51,4 +53,8 @@ public interface SettingUpgrader { return value; } + default List getListValue(final List value) { + return value; + } + } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index 2eb14f7ac65..1aeed2aee51 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -245,6 +245,30 @@ public final class Settings implements ToXContentFragment { return retVal == null ? defaultValue : retVal; } + /** + * Returns the setting value associated with the setting key. If it does not exists, + * returns the default value provided. + */ + String get(String setting, String defaultValue, boolean isList) { + Object value = settings.get(setting); + if (value != null) { + if (value instanceof List) { + if (isList == false) { + throw new IllegalArgumentException( + "Found list type value for setting [" + setting + "] but but did not expect a list for it." + ); + } + } else if (isList) { + throw new IllegalArgumentException( + "Expected list type value for setting [" + setting + "] but found [" + value.getClass() + ']' + ); + } + return toString(value); + } else { + return defaultValue; + } + } + /** * Returns the setting value (as float) associated with the setting key. If it does not exists, * returns the default value provided. diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/FutureUtils.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/FutureUtils.java index 5e087d3093b..c7345aa3b63 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/FutureUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/FutureUtils.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.util.concurrent; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; import java.util.concurrent.ExecutionException; @@ -30,8 +31,14 @@ import java.util.concurrent.TimeoutException; public class FutureUtils { + /** + * Cancel execution of this future without interrupting a running thread. See {@link Future#cancel(boolean)} for details. + * + * @param toCancel the future to cancel + * @return false if the future could not be cancelled, otherwise true + */ @SuppressForbidden(reason = "Future#cancel()") - public static boolean cancel(Future toCancel) { + public static boolean cancel(@Nullable final Future toCancel) { if (toCancel != null) { return toCancel.cancel(false); // this method is a forbidden API since it interrupts threads } diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index f34798605d7..91f4e615159 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -131,7 +131,7 @@ public class DiscoveryModule { if (discoverySupplier == null) { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); } - Loggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType); + Loggers.getLogger(getClass(), settings).info("using discovery type [{}] and host providers {}", discoveryType, hostsProviderNames); discovery = Objects.requireNonNull(discoverySupplier.get()); } diff --git a/server/src/main/java/org/elasticsearch/http/HttpInfo.java b/server/src/main/java/org/elasticsearch/http/HttpInfo.java index 4e944a0f7fa..aece8131994 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpInfo.java +++ b/server/src/main/java/org/elasticsearch/http/HttpInfo.java @@ -19,24 +19,46 @@ package org.elasticsearch.http; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import static org.elasticsearch.common.Booleans.parseBoolean; + public class HttpInfo implements Writeable, ToXContentFragment { + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(HttpInfo.class)); + + /** Whether to add hostname to publish host field when serializing. */ + private static final boolean CNAME_IN_PUBLISH_HOST = + parseBoolean(System.getProperty("es.http.cname_in_publish_address"), false); + private final BoundTransportAddress address; private final long maxContentLength; + private final boolean cnameInPublishHost; public HttpInfo(StreamInput in) throws IOException { - address = BoundTransportAddress.readBoundTransportAddress(in); - maxContentLength = in.readLong(); + this(BoundTransportAddress.readBoundTransportAddress(in), in.readLong(), CNAME_IN_PUBLISH_HOST); + } + + public HttpInfo(BoundTransportAddress address, long maxContentLength) { + this(address, maxContentLength, CNAME_IN_PUBLISH_HOST); + } + + HttpInfo(BoundTransportAddress address, long maxContentLength, boolean cnameInPublishHost) { + this.address = address; + this.maxContentLength = maxContentLength; + this.cnameInPublishHost = cnameInPublishHost; } @Override @@ -45,11 +67,6 @@ public class HttpInfo implements Writeable, ToXContentFragment { out.writeLong(maxContentLength); } - public HttpInfo(BoundTransportAddress address, long maxContentLength) { - this.address = address; - this.maxContentLength = maxContentLength; - } - static final class Fields { static final String HTTP = "http"; static final String BOUND_ADDRESS = "bound_address"; @@ -62,7 +79,21 @@ public class HttpInfo implements Writeable, ToXContentFragment { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.HTTP); builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses()); - builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString()); + TransportAddress publishAddress = address.publishAddress(); + String publishAddressString = publishAddress.toString(); + String hostString = publishAddress.address().getHostString(); + if (InetAddresses.isInetAddress(hostString) == false) { + if (cnameInPublishHost) { + publishAddressString = hostString + '/' + publishAddress.toString(); + } else { + DEPRECATION_LOGGER.deprecated( + "[http.publish_host] was printed as [ip:port] instead of [hostname/ip:port]. " + + "This format is deprecated and will change to [hostname/ip:port] in a future version. " + + "Use -Des.http.cname_in_publish_address=true to enforce non-deprecated formatting." + ); + } + } + builder.field(Fields.PUBLISH_ADDRESS, publishAddressString); builder.humanReadableField(Fields.MAX_CONTENT_LENGTH_IN_BYTES, Fields.MAX_CONTENT_LENGTH, maxContentLength()); builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index fe27aea805e..fc693113fee 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -661,7 +661,7 @@ public abstract class Engine implements Closeable { } /** get commits stats for the last commit */ - public CommitStats commitStats() { + public final CommitStats commitStats() { return new CommitStats(getLastCommittedSegmentInfos()); } @@ -678,12 +678,6 @@ public abstract class Engine implements Closeable { */ public abstract void waitForOpsToComplete(long seqNo) throws InterruptedException; - /** - * Reset the local checkpoint in the tracker to the given local checkpoint - * @param localCheckpoint the new checkpoint to be set - */ - public abstract void resetLocalCheckpoint(long localCheckpoint); - /** * @return a {@link SeqNoStats} object, using local state and the supplied global checkpoint */ @@ -951,7 +945,9 @@ public abstract class Engine implements Closeable { * * @return the commit Id for the resulting commit */ - public abstract CommitId flush() throws EngineException; + public final CommitId flush() throws EngineException { + return flush(false, false); + } /** @@ -1163,11 +1159,16 @@ public abstract class Engine implements Closeable { PRIMARY, REPLICA, PEER_RECOVERY, - LOCAL_TRANSLOG_RECOVERY; + LOCAL_TRANSLOG_RECOVERY, + LOCAL_RESET; public boolean isRecovery() { return this == PEER_RECOVERY || this == LOCAL_TRANSLOG_RECOVERY; } + + boolean isFromTranslog() { + return this == LOCAL_TRANSLOG_RECOVERY || this == LOCAL_RESET; + } } public Origin origin() { @@ -1593,7 +1594,7 @@ public abstract class Engine implements Closeable { private final CheckedRunnable onClose; private final IndexCommit indexCommit; - IndexCommitRef(IndexCommit indexCommit, CheckedRunnable onClose) { + public IndexCommitRef(IndexCommit indexCommit, CheckedRunnable onClose) { this.indexCommit = indexCommit; this.onClose = onClose; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineFactory.java b/server/src/main/java/org/elasticsearch/index/engine/EngineFactory.java index b477e27b6e1..e50bdd86e75 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineFactory.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineFactory.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.engine; /** * Simple Engine Factory */ +@FunctionalInterface public interface EngineFactory { Engine newReadWriteEngine(EngineConfig config); diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index d9b03777f1b..52dd4d3fcd0 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -152,12 +152,6 @@ public class InternalEngine extends Engine { private final SoftDeletesPolicy softDeletesPolicy; private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener; - /** - * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this - * across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents - * being indexed/deleted. - */ - private final AtomicLong writingBytes = new AtomicLong(); private final AtomicBoolean trackTranslogLocation = new AtomicBoolean(false); @Nullable @@ -530,7 +524,7 @@ public class InternalEngine extends Engine { /** Returns how many bytes we are currently moving from indexing buffer to segments on disk */ @Override public long getWritingBytes() { - return writingBytes.get(); + return indexWriter.getFlushingBytes() + versionMap.getRefreshingBytes(); } /** @@ -735,6 +729,7 @@ public class InternalEngine extends Engine { : "version: " + index.version() + " type: " + index.versionType(); return true; case LOCAL_TRANSLOG_RECOVERY: + case LOCAL_RESET: assert index.isRetry(); return true; // allow to optimize in order to update the max safe time stamp default: @@ -833,7 +828,7 @@ public class InternalEngine extends Engine { indexResult = new IndexResult( plan.versionForIndexing, getPrimaryTerm(), plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); } - if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + if (index.origin().isFromTranslog() == false) { final Translog.Location location; if (indexResult.getResultType() == Result.Type.SUCCESS) { location = translog.add(new Translog.Index(index, indexResult)); @@ -1173,7 +1168,7 @@ public class InternalEngine extends Engine { deleteResult = new DeleteResult( plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false); } - if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + if (delete.origin().isFromTranslog() == false) { final Translog.Location location; if (deleteResult.getResultType() == Result.Type.SUCCESS) { location = translog.add(new Translog.Delete(delete, deleteResult)); @@ -1411,7 +1406,7 @@ public class InternalEngine extends Engine { } } final NoOpResult noOpResult = failure != null ? new NoOpResult(getPrimaryTerm(), noOp.seqNo(), failure) : new NoOpResult(getPrimaryTerm(), noOp.seqNo()); - if (noOp.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + if (noOp.origin().isFromTranslog() == false) { final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); noOpResult.setTranslogLocation(location); } @@ -1437,9 +1432,6 @@ public class InternalEngine extends Engine { // pass the new reader reference to the external reader manager. final long localCheckpointBeforeRefresh = getLocalCheckpoint(); - // this will also cause version map ram to be freed hence we always account for it. - final long bytes = indexWriter.ramBytesUsed() + versionMap.ramBytesUsedForRefresh(); - writingBytes.addAndGet(bytes); try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); if (store.tryIncRef()) { @@ -1465,8 +1457,6 @@ public class InternalEngine extends Engine { e.addSuppressed(inner); } throw new RefreshFailedEngineException(shardId, e); - } finally { - writingBytes.addAndGet(-bytes); } assert lastRefreshedCheckpoint() >= localCheckpointBeforeRefresh : "refresh checkpoint was not advanced; " + "local_checkpoint=" + localCheckpointBeforeRefresh + " refresh_checkpoint=" + lastRefreshedCheckpoint(); @@ -1576,11 +1566,6 @@ public class InternalEngine extends Engine { || localCheckpointTracker.getCheckpoint() == localCheckpointTracker.getMaxSeqNo(); } - @Override - public CommitId flush() throws EngineException { - return flush(false, false); - } - @Override public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { ensureOpen(); @@ -2340,11 +2325,6 @@ public class InternalEngine extends Engine { localCheckpointTracker.waitForOpsToComplete(seqNo); } - @Override - public void resetLocalCheckpoint(long localCheckpoint) { - localCheckpointTracker.resetCheckpoint(localCheckpoint); - } - @Override public SeqNoStats getSeqNoStats(long globalCheckpoint) { return localCheckpointTracker.getStats(globalCheckpoint); diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index 18d3cedb37e..d0dd9466b60 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -434,6 +434,14 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta return maps.current.ramBytesUsed.get(); } + /** + * Returns how much RAM is current being freed up by refreshing. This is {@link #ramBytesUsed()} + * except does not include tombstones because they don't clear on refresh. + */ + long getRefreshingBytes() { + return maps.old.ramBytesUsed.get(); + } + @Override public Collection getChildResources() { // TODO: useful to break down RAM usage here? diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java new file mode 100644 index 00000000000..b958bd84b76 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -0,0 +1,368 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.engine; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesDirectoryReaderWrapper; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ReferenceManager; +import org.apache.lucene.search.SearcherManager; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.Lock; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogStats; + +import java.io.Closeable; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Stream; + +/** + * A basic read-only engine that allows switching a shard to be true read-only temporarily or permanently. + * Note: this engine can be opened side-by-side with a read-write engine but will not reflect any changes made to the read-write + * engine. + * + * @see #ReadOnlyEngine(EngineConfig, SeqNoStats, TranslogStats, boolean, Function) + */ +public final class ReadOnlyEngine extends Engine { + + private final SegmentInfos lastCommittedSegmentInfos; + private final SeqNoStats seqNoStats; + private final TranslogStats translogStats; + private final SearcherManager searcherManager; + private final IndexCommit indexCommit; + private final Lock indexWriterLock; + + /** + * Creates a new ReadOnlyEngine. This ctor can also be used to open a read-only engine on top of an already opened + * read-write engine. It allows to optionally obtain the writer locks for the shard which would time-out if another + * engine is still open. + * + * @param config the engine configuration + * @param seqNoStats sequence number statistics for this engine or null if not provided + * @param translogStats translog stats for this engine or null if not provided + * @param obtainLock if true this engine will try to obtain the {@link IndexWriter#WRITE_LOCK_NAME} lock. Otherwise + * the lock won't be obtained + * @param readerWrapperFunction allows to wrap the index-reader for this engine. + */ + public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats translogStats, boolean obtainLock, + Function readerWrapperFunction) { + super(config); + try { + Store store = config.getStore(); + store.incRef(); + DirectoryReader reader = null; + Directory directory = store.directory(); + Lock indexWriterLock = null; + boolean success = false; + try { + // we obtain the IW lock even though we never modify the index. + // yet this makes sure nobody else does. including some testing tools that try to be messy + indexWriterLock = obtainLock ? directory.obtainLock(IndexWriter.WRITE_LOCK_NAME) : null; + this.lastCommittedSegmentInfos = Lucene.readSegmentInfos(directory); + this.translogStats = translogStats == null ? new TranslogStats(0, 0, 0, 0, 0) : translogStats; + this.seqNoStats = seqNoStats == null ? buildSeqNoStats(lastCommittedSegmentInfos) : seqNoStats; + reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), config.getShardId()); + if (config.getIndexSettings().isSoftDeleteEnabled()) { + reader = new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD); + } + reader = readerWrapperFunction.apply(reader); + this.indexCommit = reader.getIndexCommit(); + this.searcherManager = new SearcherManager(reader, + new RamAccountingSearcherFactory(engineConfig.getCircuitBreakerService())); + this.indexWriterLock = indexWriterLock; + success = true; + } finally { + if (success == false) { + IOUtils.close(reader, indexWriterLock, store::decRef); + } + } + } catch (IOException e) { + throw new UncheckedIOException(e); // this is stupid + } + } + + @Override + protected void closeNoLock(String reason, CountDownLatch closedLatch) { + if (isClosed.compareAndSet(false, true)) { + try { + IOUtils.close(searcherManager, indexWriterLock, store::decRef); + } catch (Exception ex) { + logger.warn("failed to close searcher", ex); + } finally { + closedLatch.countDown(); + } + } + } + + public static SeqNoStats buildSeqNoStats(SegmentInfos infos) { + final SequenceNumbers.CommitInfo seqNoStats = + SequenceNumbers.loadSeqNoInfoFromLuceneCommit(infos.userData.entrySet()); + long maxSeqNo = seqNoStats.maxSeqNo; + long localCheckpoint = seqNoStats.localCheckpoint; + return new SeqNoStats(maxSeqNo, localCheckpoint, localCheckpoint); + } + + @Override + public GetResult get(Get get, BiFunction searcherFactory) throws EngineException { + return getFromSearcher(get, searcherFactory, SearcherScope.EXTERNAL); + } + + @Override + protected ReferenceManager getReferenceManager(SearcherScope scope) { + return searcherManager; + } + + @Override + protected SegmentInfos getLastCommittedSegmentInfos() { + return lastCommittedSegmentInfos; + } + + @Override + public String getHistoryUUID() { + return lastCommittedSegmentInfos.userData.get(Engine.HISTORY_UUID_KEY); + } + + @Override + public long getWritingBytes() { + return 0; + } + + @Override + public long getIndexThrottleTimeInMillis() { + return 0; + } + + @Override + public boolean isThrottled() { + return false; + } + + @Override + public IndexResult index(Index index) { + assert false : "this should not be called"; + throw new UnsupportedOperationException("indexing is not supported on a read-only engine"); + } + + @Override + public DeleteResult delete(Delete delete) { + assert false : "this should not be called"; + throw new UnsupportedOperationException("deletes are not supported on a read-only engine"); + } + + @Override + public NoOpResult noOp(NoOp noOp) { + assert false : "this should not be called"; + throw new UnsupportedOperationException("no-ops are not supported on a read-only engine"); + } + + @Override + public boolean isTranslogSyncNeeded() { + return false; + } + + @Override + public boolean ensureTranslogSynced(Stream locations) { + return false; + } + + @Override + public void syncTranslog() { + } + + @Override + public Closeable acquireRetentionLockForPeerRecovery() { + return () -> {}; + } + + @Override + public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, long fromSeqNo, long toSeqNo, + boolean requiredFullRange) throws IOException { + return readHistoryOperations(source, mapperService, fromSeqNo); + } + + @Override + public Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { + return new Translog.Snapshot() { + @Override + public void close() { } + @Override + public int totalOperations() { + return 0; + } + @Override + public Translog.Operation next() { + return null; + } + }; + } + + @Override + public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { + return 0; + } + + @Override + public boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException { + return false; + } + + @Override + public TranslogStats getTranslogStats() { + return translogStats; + } + + @Override + public Translog.Location getTranslogLastWriteLocation() { + return new Translog.Location(0,0,0); + } + + @Override + public long getLocalCheckpoint() { + return seqNoStats.getLocalCheckpoint(); + } + + @Override + public void waitForOpsToComplete(long seqNo) { + } + + @Override + public SeqNoStats getSeqNoStats(long globalCheckpoint) { + return new SeqNoStats(seqNoStats.getMaxSeqNo(), seqNoStats.getLocalCheckpoint(), globalCheckpoint); + } + + @Override + public long getLastSyncedGlobalCheckpoint() { + return seqNoStats.getGlobalCheckpoint(); + } + + @Override + public long getIndexBufferRAMBytesUsed() { + return 0; + } + + @Override + public List segments(boolean verbose) { + return Arrays.asList(getSegmentInfo(lastCommittedSegmentInfos, verbose)); + } + + @Override + public void refresh(String source) { + // we could allow refreshes if we want down the road the searcher manager will then reflect changes to a rw-engine + // opened side-by-side + } + + @Override + public void writeIndexingBuffer() throws EngineException { + } + + @Override + public boolean shouldPeriodicallyFlush() { + return false; + } + + @Override + public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) { + // we can't do synced flushes this would require an indexWriter which we don't have + throw new UnsupportedOperationException("syncedFlush is not supported on a read-only engine"); + } + + @Override + public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { + return new CommitId(lastCommittedSegmentInfos.getId()); + } + + @Override + public void forceMerge(boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, + boolean upgrade, boolean upgradeOnlyAncientSegments) { + } + + @Override + public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) { + store.incRef(); + return new IndexCommitRef(indexCommit, store::decRef); + } + + @Override + public IndexCommitRef acquireSafeIndexCommit() { + return acquireLastIndexCommit(false); + } + + @Override + public void activateThrottling() { + } + + @Override + public void deactivateThrottling() { + } + + @Override + public void trimUnreferencedTranslogFiles() { + } + + @Override + public boolean shouldRollTranslogGeneration() { + return false; + } + + @Override + public void rollTranslogGeneration() { + } + + @Override + public void restoreLocalCheckpointFromTranslog() { + } + + @Override + public int fillSeqNoGaps(long primaryTerm) { + return 0; + } + + @Override + public Engine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) { + return this; + } + + @Override + public void skipTranslogRecovery() { + } + + @Override + public void trimOperationsFromTranslog(long belowTerm, long aboveSeqNo) { + } + + @Override + public void maybePruneDeletes() { + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 4a3fa852e7f..82a601de05e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -111,17 +111,6 @@ public abstract class MappedFieldType extends FieldType { public boolean equals(Object o) { if (!super.equals(o)) return false; MappedFieldType fieldType = (MappedFieldType) o; - // check similarity first because we need to check the name, and it might be null - // TODO: SimilarityProvider should have equals? - if (similarity == null || fieldType.similarity == null) { - if (similarity != fieldType.similarity) { - return false; - } - } else { - if (Objects.equals(similarity.name(), fieldType.similarity.name()) == false) { - return false; - } - } return boost == fieldType.boost && docValues == fieldType.docValues && @@ -131,7 +120,8 @@ public abstract class MappedFieldType extends FieldType { Objects.equals(searchQuoteAnalyzer(), fieldType.searchQuoteAnalyzer()) && Objects.equals(eagerGlobalOrdinals, fieldType.eagerGlobalOrdinals) && Objects.equals(nullValue, fieldType.nullValue) && - Objects.equals(nullValueAsString, fieldType.nullValueAsString); + Objects.equals(nullValueAsString, fieldType.nullValueAsString) && + Objects.equals(similarity, fieldType.similarity); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilder.java index c64f1b1e403..6cfe7d177da 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilder.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.function.ScoreFunction; import org.elasticsearch.common.lucene.search.function.WeightFactorFunction; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryShardContext; @@ -46,7 +45,7 @@ public abstract class ScoreFunctionBuilder> * Read from a stream. */ public ScoreFunctionBuilder(StreamInput in) throws IOException { - weight = in.readOptionalFloat(); + weight = checkWeight(in.readOptionalFloat()); } @Override @@ -70,10 +69,17 @@ public abstract class ScoreFunctionBuilder> */ @SuppressWarnings("unchecked") public final FB setWeight(float weight) { - this.weight = weight; + this.weight = checkWeight(weight); return (FB) this; } + private Float checkWeight(Float weight) { + if (weight != null && Float.compare(weight, 0) < 0) { + throw new IllegalArgumentException("[weight] cannot be negative for a filtering function"); + } + return weight; + } + /** * The weight applied to the function before combining. */ diff --git a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java index cd33c1bf046..9fad96940b8 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java @@ -109,6 +109,7 @@ public class LocalCheckpointTracker { * @param checkpoint the local checkpoint to reset this tracker to */ public synchronized void resetCheckpoint(final long checkpoint) { + // TODO: remove this method as after we restore the local history on promotion. assert checkpoint != SequenceNumbers.UNASSIGNED_SEQ_NO; assert checkpoint <= this.checkpoint; processedSeqNo.clear(); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java b/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java index 9c1795d654c..c711fb42936 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java @@ -91,5 +91,4 @@ public class SeqNoStats implements ToXContentFragment, Writeable { ", globalCheckpoint=" + globalCheckpoint + '}'; } - } diff --git a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java index c56b0d740e7..c967e94f7da 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java +++ b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java @@ -51,6 +51,4 @@ public abstract class AbstractIndexShardComponent implements IndexShardComponent public String nodeName() { return indexSettings.getNodeName(); } - - } diff --git a/server/src/main/java/org/elasticsearch/index/shard/GlobalCheckpointListeners.java b/server/src/main/java/org/elasticsearch/index/shard/GlobalCheckpointListeners.java index e279badec4a..224d5be17e1 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/GlobalCheckpointListeners.java +++ b/server/src/main/java/org/elasticsearch/index/shard/GlobalCheckpointListeners.java @@ -21,13 +21,19 @@ package org.elasticsearch.index.shard; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.FutureUtils; import java.io.Closeable; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; +import java.util.LinkedHashMap; +import java.util.Map; import java.util.Objects; import java.util.concurrent.Executor; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -45,38 +51,43 @@ public class GlobalCheckpointListeners implements Closeable { public interface GlobalCheckpointListener { /** * Callback when the global checkpoint is updated or the shard is closed. If the shard is closed, the value of the global checkpoint - * will be set to {@link org.elasticsearch.index.seqno.SequenceNumbers#UNASSIGNED_SEQ_NO} and the exception will be non-null. If the - * global checkpoint is updated, the exception will be null. + * will be set to {@link org.elasticsearch.index.seqno.SequenceNumbers#UNASSIGNED_SEQ_NO} and the exception will be non-null and an + * instance of {@link IndexShardClosedException }. If the listener timed out waiting for notification then the exception will be + * non-null and an instance of {@link TimeoutException}. If the global checkpoint is updated, the exception will be null. * * @param globalCheckpoint the updated global checkpoint - * @param e if non-null, the shard is closed + * @param e if non-null, the shard is closed or the listener timed out */ - void accept(long globalCheckpoint, IndexShardClosedException e); + void accept(long globalCheckpoint, Exception e); } // guarded by this private boolean closed; - private volatile List listeners; + private Map> listeners; private long lastKnownGlobalCheckpoint = UNASSIGNED_SEQ_NO; private final ShardId shardId; private final Executor executor; + private final ScheduledExecutorService scheduler; private final Logger logger; /** * Construct a global checkpoint listeners collection. * - * @param shardId the shard ID on which global checkpoint updates can be listened to - * @param executor the executor for listener notifications - * @param logger a shard-level logger + * @param shardId the shard ID on which global checkpoint updates can be listened to + * @param executor the executor for listener notifications + * @param scheduler the executor used for scheduling timeouts + * @param logger a shard-level logger */ GlobalCheckpointListeners( final ShardId shardId, final Executor executor, + final ScheduledExecutorService scheduler, final Logger logger) { - this.shardId = Objects.requireNonNull(shardId); - this.executor = Objects.requireNonNull(executor); - this.logger = Objects.requireNonNull(logger); + this.shardId = Objects.requireNonNull(shardId, "shardId"); + this.executor = Objects.requireNonNull(executor, "executor"); + this.scheduler = Objects.requireNonNull(scheduler, "scheduler"); + this.logger = Objects.requireNonNull(logger, "logger"); } /** @@ -84,12 +95,15 @@ public class GlobalCheckpointListeners implements Closeable { * listener will be asynchronously notified on the executor used to construct this collection of global checkpoint listeners. If the * shard is closed then the listener will be asynchronously notified on the executor used to construct this collection of global * checkpoint listeners. The listener will only be notified of at most one event, either the global checkpoint is updated or the shard - * is closed. A listener must re-register after one of these events to receive subsequent events. + * is closed. A listener must re-register after one of these events to receive subsequent events. Callers may add a timeout to be + * notified after if the timeout elapses. In this case, the listener will be notified with a {@link TimeoutException}. Passing null for + * the timeout means no timeout will be associated to the listener. * * @param currentGlobalCheckpoint the current global checkpoint known to the listener * @param listener the listener + * @param timeout the listener timeout, or null if no timeout */ - synchronized void add(final long currentGlobalCheckpoint, final GlobalCheckpointListener listener) { + synchronized void add(final long currentGlobalCheckpoint, final GlobalCheckpointListener listener, final TimeValue timeout) { if (closed) { executor.execute(() -> notifyListener(listener, UNASSIGNED_SEQ_NO, new IndexShardClosedException(shardId))); return; @@ -97,12 +111,43 @@ public class GlobalCheckpointListeners implements Closeable { if (lastKnownGlobalCheckpoint > currentGlobalCheckpoint) { // notify directly executor.execute(() -> notifyListener(listener, lastKnownGlobalCheckpoint, null)); - return; } else { if (listeners == null) { - listeners = new ArrayList<>(); + listeners = new LinkedHashMap<>(); + } + if (timeout == null) { + listeners.put(listener, null); + } else { + listeners.put( + listener, + scheduler.schedule( + () -> { + final boolean removed; + synchronized (this) { + /* + * Note that the listeners map can be null if a notification nulled out the map reference when + * notifying listeners, and then our scheduled execution occurred before we could be cancelled by + * the notification. In this case, we would have blocked waiting for access to this critical + * section. + * + * What is more, we know that this listener has a timeout associated with it (otherwise we would + * not be here) so the return value from remove being null is an indication that we are not in the + * map. This can happen if a notification nulled out the listeners, and then our scheduled execution + * occurred before we could be cancelled by the notification, and then another thread added a + * listener causing the listeners map reference to be non-null again. In this case, our listener + * here would not be in the map and we should not fire the timeout logic. + */ + removed = listeners != null && listeners.remove(listener) != null; + } + if (removed) { + final TimeoutException e = new TimeoutException(timeout.getStringRep()); + logger.trace("global checkpoint listener timed out", e); + executor.execute(() -> notifyListener(listener, UNASSIGNED_SEQ_NO, e)); + } + }, + timeout.nanos(), + TimeUnit.NANOSECONDS)); } - listeners.add(listener); } } @@ -112,10 +157,25 @@ public class GlobalCheckpointListeners implements Closeable { notifyListeners(UNASSIGNED_SEQ_NO, new IndexShardClosedException(shardId)); } + /** + * The number of listeners currently pending for notification. + * + * @return the number of listeners pending notification + */ synchronized int pendingListeners() { return listeners == null ? 0 : listeners.size(); } + /** + * The scheduled future for a listener that has a timeout associated with it, otherwise null. + * + * @param listener the listener to get the scheduled future for + * @return a scheduled future representing the timeout future for the listener, otherwise null + */ + synchronized ScheduledFuture getTimeoutFuture(final GlobalCheckpointListener listener) { + return listeners.get(listener); + } + /** * Invoke to notify all registered listeners of an updated global checkpoint. * @@ -135,19 +195,24 @@ public class GlobalCheckpointListeners implements Closeable { assert (globalCheckpoint == UNASSIGNED_SEQ_NO && e != null) || (globalCheckpoint >= NO_OPS_PERFORMED && e == null); if (listeners != null) { // capture the current listeners - final List currentListeners = listeners; + final Map> currentListeners = listeners; listeners = null; if (currentListeners != null) { executor.execute(() -> { - for (final GlobalCheckpointListener listener : currentListeners) { - notifyListener(listener, globalCheckpoint, e); + for (final Map.Entry> listener : currentListeners.entrySet()) { + /* + * We do not want to interrupt any timeouts that fired, these will detect that the listener has been notified and + * not trigger the timeout. + */ + FutureUtils.cancel(listener.getValue()); + notifyListener(listener.getKey(), globalCheckpoint, e); } }); } } } - private void notifyListener(final GlobalCheckpointListener listener, final long globalCheckpoint, final IndexShardClosedException e) { + private void notifyListener(final GlobalCheckpointListener listener, final long globalCheckpoint, final Exception e) { try { listener.accept(globalCheckpoint, e); } catch (final Exception caught) { @@ -157,8 +222,11 @@ public class GlobalCheckpointListeners implements Closeable { "error notifying global checkpoint listener of updated global checkpoint [{}]", globalCheckpoint), caught); - } else { + } else if (e instanceof IndexShardClosedException) { logger.warn("error notifying global checkpoint listener of closed shard", caught); + } else { + assert e instanceof TimeoutException : e; + logger.warn("error notifying global checkpoint listener of timeout", caught); } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index bceb106aeef..91d87b00082 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -163,7 +163,6 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; import static org.elasticsearch.index.mapper.SourceToParse.source; -import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard { @@ -303,7 +302,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP); this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays); final String aId = shardRouting.allocationId().getId(); - this.globalCheckpointListeners = new GlobalCheckpointListeners(shardId, threadPool.executor(ThreadPool.Names.LISTENER), logger); + this.globalCheckpointListeners = + new GlobalCheckpointListeners(shardId, threadPool.executor(ThreadPool.Names.LISTENER), threadPool.scheduler(), logger); this.replicationTracker = new ReplicationTracker(shardId, aId, indexSettings, UNASSIGNED_SEQ_NO, globalCheckpointListeners::globalCheckpointUpdated); @@ -1273,16 +1273,18 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return result; } - // package-private for testing - int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot) throws IOException { - recoveryState.getTranslog().totalOperations(snapshot.totalOperations()); - recoveryState.getTranslog().totalOperationsOnStart(snapshot.totalOperations()); + /** + * Replays translog operations from the provided translog {@code snapshot} to the current engine using the given {@code origin}. + * The callback {@code onOperationRecovered} is notified after each translog operation is replayed successfully. + */ + int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot, Engine.Operation.Origin origin, + Runnable onOperationRecovered) throws IOException { int opsRecovered = 0; Translog.Operation operation; while ((operation = snapshot.next()) != null) { try { logger.trace("[translog] recover op {}", operation); - Engine.Result result = applyTranslogOperation(operation, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY); + Engine.Result result = applyTranslogOperation(operation, origin); switch (result.getResultType()) { case FAILURE: throw result.getFailure(); @@ -1295,7 +1297,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } opsRecovered++; - recoveryState.getTranslog().incrementRecoveredOperations(); + onOperationRecovered.run(); } catch (Exception e) { if (ExceptionsHelper.status(e) == RestStatus.BAD_REQUEST) { // mainly for MapperParsingException and Failure to detect xcontent @@ -1313,8 +1315,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl * Operations from the translog will be replayed to bring lucene up to date. **/ public void openEngineAndRecoverFromTranslog() throws IOException { + final RecoveryState.Translog translogRecoveryStats = recoveryState.getTranslog(); + final Engine.TranslogRecoveryRunner translogRecoveryRunner = (engine, snapshot) -> { + translogRecoveryStats.totalOperations(snapshot.totalOperations()); + translogRecoveryStats.totalOperationsOnStart(snapshot.totalOperations()); + return runTranslogRecovery(engine, snapshot, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, + translogRecoveryStats::incrementRecoveredOperations); + }; innerOpenEngineAndTranslog(); - getEngine().recoverFromTranslog(this::runTranslogRecovery, Long.MAX_VALUE); + getEngine().recoverFromTranslog(translogRecoveryRunner, Long.MAX_VALUE); } /** @@ -1352,11 +1361,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint"); - - assertMaxUnsafeAutoIdInCommit(); - - final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID); - store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, config.getIndexSettings().getIndexVersionCreated()); + trimUnsafeCommits(); createNewEngine(config); verifyNotClosed(); @@ -1367,6 +1372,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage(); } + private void trimUnsafeCommits() throws IOException { + assert currentEngineReference.get() == null : "engine is running"; + final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); + final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID); + assertMaxUnsafeAutoIdInCommit(); + store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, indexSettings.getIndexVersionCreated()); + } + private boolean assertSequenceNumbersInCommit() throws IOException { final Map userData = SegmentInfos.readLatestCommit(store.directory()).getUserData(); assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint"; @@ -1463,7 +1477,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl if (origin == Engine.Operation.Origin.PRIMARY) { assert assertPrimaryMode(); } else { - assert origin == Engine.Operation.Origin.REPLICA; + assert origin == Engine.Operation.Origin.REPLICA || origin == Engine.Operation.Origin.LOCAL_RESET; assert assertReplicationTarget(); } if (writeAllowedStates.contains(state) == false) { @@ -1768,15 +1782,18 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl /** * Add a global checkpoint listener. If the global checkpoint is above the current global checkpoint known to the listener then the - * listener will fire immediately on the calling thread. + * listener will fire immediately on the calling thread. If the specified timeout elapses before the listener is notified, the listener + * will be notified with an {@link TimeoutException}. A caller may pass null to specify no timeout. * * @param currentGlobalCheckpoint the current global checkpoint known to the listener * @param listener the listener + * @param timeout the timeout */ public void addGlobalCheckpointListener( final long currentGlobalCheckpoint, - final GlobalCheckpointListeners.GlobalCheckpointListener listener) { - this.globalCheckpointListeners.add(currentGlobalCheckpoint, listener); + final GlobalCheckpointListeners.GlobalCheckpointListener listener, + final TimeValue timeout) { + this.globalCheckpointListeners.add(currentGlobalCheckpoint, listener, timeout); } /** @@ -2166,9 +2183,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl private Engine createNewEngine(EngineConfig config) { synchronized (mutex) { - if (state == IndexShardState.CLOSED) { - throw new AlreadyClosedException(shardId + " can't create engine - shard is closed"); - } + verifyNotClosed(); assert this.currentEngineReference.get() == null; Engine engine = newEngine(config); onNewEngine(engine); // call this before we pass the memory barrier otherwise actions that happen @@ -2314,19 +2329,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl bumpPrimaryTerm(opPrimaryTerm, () -> { updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition"); final long currentGlobalCheckpoint = getGlobalCheckpoint(); - final long localCheckpoint; - if (currentGlobalCheckpoint == UNASSIGNED_SEQ_NO) { - localCheckpoint = NO_OPS_PERFORMED; + final long maxSeqNo = seqNoStats().getMaxSeqNo(); + logger.info("detected new primary with primary term [{}], global checkpoint [{}], max_seq_no [{}]", + opPrimaryTerm, currentGlobalCheckpoint, maxSeqNo); + if (currentGlobalCheckpoint < maxSeqNo) { + resetEngineToGlobalCheckpoint(); } else { - localCheckpoint = currentGlobalCheckpoint; + getEngine().rollTranslogGeneration(); } - logger.trace( - "detected new primary with primary term [{}], resetting local checkpoint from [{}] to [{}]", - opPrimaryTerm, - getLocalCheckpoint(), - localCheckpoint); - getEngine().resetLocalCheckpoint(localCheckpoint); - getEngine().rollTranslogGeneration(); }); } } @@ -2687,4 +2697,26 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } }; } + + /** + * Rollback the current engine to the safe commit, then replay local translog up to the global checkpoint. + */ + void resetEngineToGlobalCheckpoint() throws IOException { + assert getActiveOperationsCount() == 0 : "Ongoing writes [" + getActiveOperations() + "]"; + sync(); // persist the global checkpoint to disk + final long globalCheckpoint = getGlobalCheckpoint(); + final Engine newEngine; + synchronized (mutex) { + verifyNotClosed(); + IOUtils.close(currentEngineReference.getAndSet(null)); + trimUnsafeCommits(); + newEngine = createNewEngine(newEngineConfig()); + active.set(true); + } + final Engine.TranslogRecoveryRunner translogRunner = (engine, snapshot) -> runTranslogRecovery( + engine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> { + // TODO: add a dedicate recovery stats for the reset translog + }); + newEngine.recoverFromTranslog(translogRunner, globalCheckpoint); + } } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java index fed15b30583..f5a870441d4 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java @@ -21,6 +21,8 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.Similarity; +import java.util.Objects; + /** * Wrapper around a {@link Similarity} and its name. */ @@ -48,4 +50,28 @@ public final class SimilarityProvider { return similarity; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SimilarityProvider that = (SimilarityProvider) o; + /** + * We check name only because the similarity is + * re-created for each new instance and they don't implement equals. + * This is not entirely correct though but we only use equality checks + * for similarities inside the same index and names are unique in this case. + **/ + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + /** + * We use name only because the similarity is + * re-created for each new instance and they don't implement equals. + * This is not entirely correct though but we only use equality checks + * for similarities a single index and names are unique in this case. + **/ + return Objects.hash(name); + } } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index b892c5c01fe..8e57caad3b4 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -1439,11 +1439,28 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref */ public void bootstrapNewHistory() throws IOException { metadataLock.writeLock().lock(); - try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { - final Map userData = getUserData(writer); + try { + Map userData = readLastCommittedSegmentsInfo().getUserData(); final long maxSeqNo = Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)); + bootstrapNewHistory(maxSeqNo); + } finally { + metadataLock.writeLock().unlock(); + } + } + + /** + * Marks an existing lucene index with a new history uuid and sets the given maxSeqNo as the local checkpoint + * as well as the maximum sequence number. + * This is used to make sure no existing shard will recovery from this index using ops based recovery. + * @see SequenceNumbers#LOCAL_CHECKPOINT_KEY + * @see SequenceNumbers#MAX_SEQ_NO + */ + public void bootstrapNewHistory(long maxSeqNo) throws IOException { + metadataLock.writeLock().lock(); + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { final Map map = new HashMap<>(); map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); + map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo)); updateCommitData(writer, map); } finally { diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 1c83a880511..e9f674e14a5 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -396,7 +396,6 @@ public class IndicesService extends AbstractLifecycleComponent public IndexService indexService(Index index) { return indices.get(index.getUUID()); } - /** * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. */ diff --git a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java new file mode 100644 index 00000000000..4e8e9b6c7f5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java @@ -0,0 +1,167 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories; + +import org.apache.lucene.index.IndexCommit; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotShardFailure; + +import java.io.IOException; +import java.util.List; + +public class FilterRepository implements Repository { + + private final Repository in; + + public FilterRepository(Repository in) { + this.in = in; + } + + @Override + public RepositoryMetaData getMetadata() { + return in.getMetadata(); + } + + @Override + public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) { + return in.getSnapshotInfo(snapshotId); + } + + @Override + public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { + return in.getSnapshotGlobalMetaData(snapshotId); + } + + @Override + public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId index) throws IOException { + return in.getSnapshotIndexMetaData(snapshotId, index); + } + + @Override + public RepositoryData getRepositoryData() { + return in.getRepositoryData(); + } + + @Override + public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData metaData) { + in.initializeSnapshot(snapshotId, indices, metaData); + } + + @Override + public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, + List shardFailures, long repositoryStateId, boolean includeGlobalState) { + return in.finalizeSnapshot(snapshotId, indices, startTime, failure, totalShards, shardFailures, repositoryStateId, + includeGlobalState); + } + + @Override + public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { + in.deleteSnapshot(snapshotId, repositoryStateId); + } + + @Override + public long getSnapshotThrottleTimeInNanos() { + return in.getSnapshotThrottleTimeInNanos(); + } + + @Override + public long getRestoreThrottleTimeInNanos() { + return in.getRestoreThrottleTimeInNanos(); + } + + @Override + public String startVerification() { + return in.startVerification(); + } + + @Override + public void endVerification(String verificationToken) { + in.endVerification(verificationToken); + } + + @Override + public void verify(String verificationToken, DiscoveryNode localNode) { + in.verify(verificationToken, localNode); + } + + @Override + public boolean isReadOnly() { + return in.isReadOnly(); + } + + @Override + public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, + IndexShardSnapshotStatus snapshotStatus) { + in.snapshotShard(shard, store, snapshotId, indexId, snapshotIndexCommit, snapshotStatus); + } + + @Override + public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, + RecoveryState recoveryState) { + in.restoreShard(shard, snapshotId, version, indexId, snapshotShardId, recoveryState); + } + + @Override + public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) { + return in.getShardSnapshotStatus(snapshotId, version, indexId, shardId); + } + + @Override + public Lifecycle.State lifecycleState() { + return in.lifecycleState(); + } + + @Override + public void addLifecycleListener(LifecycleListener listener) { + in.addLifecycleListener(listener); + } + + @Override + public void removeLifecycleListener(LifecycleListener listener) { + in.removeLifecycleListener(listener); + } + + @Override + public void start() { + in.start(); + } + + @Override + public void stop() { + in.stop(); + } + + @Override + public void close() { + in.close(); + } +} diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index c6cbaa50cdf..aef4381cd8b 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -398,7 +398,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta "repository type [" + repositoryMetaData.type() + "] does not exist"); } try { - Repository repository = factory.create(repositoryMetaData); + Repository repository = factory.create(repositoryMetaData, typesRegistry::get); repository.start(); return repository; } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index c0b45259f99..9f16d26ac75 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -35,6 +36,7 @@ import org.elasticsearch.snapshots.SnapshotShardFailure; import java.io.IOException; import java.util.List; +import java.util.function.Function; /** * An interface for interacting with a repository in snapshot and restore. @@ -46,7 +48,7 @@ import java.util.List; *

@@ -63,6 +65,10 @@ public interface Repository extends LifecycleComponent { * @param metadata metadata for the repository including name and settings */ Repository create(RepositoryMetaData metadata) throws Exception; + + default Repository create(RepositoryMetaData metaData, Function typeLookup) throws Exception { + return create(metaData); + } } /** @@ -188,14 +194,15 @@ public interface Repository extends LifecycleComponent { *

* As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. - * * @param shard shard to be snapshotted + * @param store store to be snapshotted * @param snapshotId snapshot id * @param indexId id for the index being snapshotted * @param snapshotIndexCommit commit point * @param snapshotStatus snapshot status */ - void snapshotShard(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus); + void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, + IndexShardSnapshotStatus snapshotStatus); /** * Restores snapshot of the shard. diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 4c36cc5eed8..df80dd473f1 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -845,8 +845,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } @Override - public void snapshotShard(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { - SnapshotContext snapshotContext = new SnapshotContext(shard, snapshotId, indexId, snapshotStatus, System.currentTimeMillis()); + public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, + IndexShardSnapshotStatus snapshotStatus) { + SnapshotContext snapshotContext = new SnapshotContext(store, snapshotId, indexId, snapshotStatus, System.currentTimeMillis()); try { snapshotContext.snapshot(snapshotIndexCommit); } catch (Exception e) { @@ -854,7 +855,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp if (e instanceof IndexShardSnapshotFailedException) { throw (IndexShardSnapshotFailedException) e; } else { - throw new IndexShardSnapshotFailedException(shard.shardId(), e); + throw new IndexShardSnapshotFailedException(store.shardId(), e); } } } @@ -1157,15 +1158,15 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp /** * Constructs new context * - * @param shard shard to be snapshotted + * @param store store to be snapshotted * @param snapshotId snapshot id * @param indexId the id of the index being snapshotted * @param snapshotStatus snapshot status to report progress */ - SnapshotContext(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexShardSnapshotStatus snapshotStatus, long startTime) { - super(snapshotId, Version.CURRENT, indexId, shard.shardId()); + SnapshotContext(Store store, SnapshotId snapshotId, IndexId indexId, IndexShardSnapshotStatus snapshotStatus, long startTime) { + super(snapshotId, Version.CURRENT, indexId, store.shardId()); this.snapshotStatus = snapshotStatus; - this.store = shard.store(); + this.store = store; this.startTime = startTime; } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java index 3ef3064697a..97e5b70f9da 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java @@ -46,6 +46,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; /** * Query sub phase which pulls data from doc values @@ -77,6 +78,15 @@ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase { hits = hits.clone(); // don't modify the incoming hits Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); + List noFormatFields = context.docValueFieldsContext().fields().stream().filter(f -> f.format == null).map(f -> f.field) + .collect(Collectors.toList()); + if (noFormatFields.isEmpty() == false) { + DEPRECATION_LOGGER.deprecated("There are doc-value fields which are not using a format. The output will " + + "change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass " + + "[format={}] with a doc value field in order to opt in for the future behaviour and ease the migration to " + + "7.0: {}", DocValueFieldsContext.USE_DEFAULT_FORMAT, noFormatFields); + } + for (FieldAndFormat fieldAndFormat : context.docValueFieldsContext().fields()) { String field = fieldAndFormat.field; MappedFieldType fieldType = context.mapperService().fullName(field); @@ -84,10 +94,6 @@ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase { final IndexFieldData indexFieldData = context.getForField(fieldType); final DocValueFormat format; if (fieldAndFormat.format == null) { - DEPRECATION_LOGGER.deprecated("Doc-value field [" + fieldAndFormat.field + "] is not using a format. The output will " + - "change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass " + - "[format={}] with the doc value field in order to opt in for the future behaviour and ease the migration to " + - "7.0.", DocValueFieldsContext.USE_DEFAULT_FORMAT); format = null; } else { String formatDesc = fieldAndFormat.format; diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 33b4d852987..88612dbcc50 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -389,7 +389,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements try { // we flush first to make sure we get the latest writes snapshotted try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) { - repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus); + repository.snapshotShard(indexShard, indexShard.store(), snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), + snapshotStatus); if (logger.isDebugEnabled()) { final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index f08ef75612f..f75d01a0233 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -51,6 +52,11 @@ import java.util.stream.Stream; */ public abstract class RemoteClusterAware extends AbstractComponent { + static { + // remove search.remote.* settings in 8.0.0 + assert Version.CURRENT.major < 8; + } + public static final Setting.AffixSetting> SEARCH_REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting( "search.remote.", @@ -66,6 +72,20 @@ public abstract class RemoteClusterAware extends AbstractComponent { Setting.Property.Dynamic, Setting.Property.NodeScope)); + public static final SettingUpgrader> SEARCH_REMOTE_CLUSTER_SEEDS_UPGRADER = new SettingUpgrader>() { + + @Override + public Setting> getSetting() { + return SEARCH_REMOTE_CLUSTERS_SEEDS; + } + + @Override + public String getKey(final String key) { + return key.replaceFirst("^search", "cluster"); + } + + }; + /** * A list of initial seed nodes to discover eligible nodes from the remote cluster */ @@ -105,6 +125,20 @@ public abstract class RemoteClusterAware extends AbstractComponent { Setting.Property.NodeScope), REMOTE_CLUSTERS_SEEDS); + public static final SettingUpgrader SEARCH_REMOTE_CLUSTERS_PROXY_UPGRADER = new SettingUpgrader() { + + @Override + public Setting getSetting() { + return SEARCH_REMOTE_CLUSTERS_PROXY; + } + + @Override + public String getKey(final String key) { + return key.replaceFirst("^search", "cluster"); + } + + }; + /** * A proxy address for the remote cluster. * NOTE: this settings is undocumented until we have at last one transport that supports passing diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 0e8bd5cb28d..75891ef820c 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -19,8 +19,6 @@ package org.elasticsearch.transport; -import java.util.Collection; -import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; @@ -35,6 +33,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.CountDown; @@ -43,6 +42,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -55,6 +55,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -65,6 +66,11 @@ import static org.elasticsearch.common.settings.Setting.boolSetting; */ public final class RemoteClusterService extends RemoteClusterAware implements Closeable { + static { + // remove search.remote.* settings in 8.0.0 + assert Version.CURRENT.major < 8; + } + public static final Setting SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER = Setting.intSetting("search.remote.connections_per_cluster", 3, 1, Setting.Property.NodeScope, Setting.Property.Deprecated); @@ -132,6 +138,20 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl key -> boolSetting(key, false, Setting.Property.Deprecated, Setting.Property.Dynamic, Setting.Property.NodeScope), REMOTE_CLUSTERS_SEEDS); + public static final SettingUpgrader SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE_UPGRADER = new SettingUpgrader() { + + @Override + public Setting getSetting() { + return SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE; + } + + @Override + public String getKey(final String key) { + return key.replaceFirst("^search", "cluster"); + } + + }; + public static final Setting.AffixSetting REMOTE_CLUSTER_SKIP_UNAVAILABLE = Setting.affixKeySetting( "cluster.remote.", diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java index bd43182f007..ce60b14b3ef 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java @@ -47,6 +47,7 @@ import java.util.Collections; import java.util.HashSet; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.equalTo; public class TransportResizeActionTests extends ESTestCase { @@ -92,6 +93,16 @@ public class TransportResizeActionTests extends ESTestCase { ).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards ")); + IllegalArgumentException softDeletesError = expectThrows(IllegalArgumentException.class, () -> { + ResizeRequest req = new ResizeRequest("target", "source"); + req.getTargetIndexRequest().settings(Settings.builder().put("index.soft_deletes.enabled", false)); + ClusterState clusterState = createClusterState("source", 8, 1, + Settings.builder().put("index.blocks.write", true).put("index.soft_deletes.enabled", true).build()); + TransportResizeAction.prepareCreateIndexRequest(req, clusterState, + (i) -> new DocsStats(between(10, 1000), between(1, 10), between(1, 10000)), "source", "target"); + }); + assertThat(softDeletesError.getMessage(), equalTo("Can't disable [index.soft_deletes.enabled] setting on resize")); + // create one that won't fail ClusterState clusterState = ClusterState.builder(createClusterState("source", randomIntBetween(2, 10), 0, Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index b17a0cc5418..1a0e964ef77 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -53,7 +53,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Set; -import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.emptySet; @@ -113,6 +112,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) // manual collection or upon cluster forming. .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s") @@ -121,8 +121,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(TestPlugin.class, - MockTransportService.TestPlugin.class); + return Arrays.asList(TestPlugin.class, MockTransportService.TestPlugin.class); } public void testClusterInfoServiceCollectsInformation() throws Exception { @@ -172,7 +171,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { } } - public void testClusterInfoServiceInformationClearOnError() throws InterruptedException, ExecutionException { + public void testClusterInfoServiceInformationClearOnError() { internalCluster().startNodes(2, // manually control publishing Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build()); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 24f5a696561..abb34f80eac 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -261,6 +261,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { .put("index.version.upgraded", upgraded) .put("index.similarity.default.type", "BM25") .put("index.analysis.analyzer.default.tokenizer", "keyword") + .put("index.soft_deletes.enabled", "true") .build(); runPrepareResizeIndexSettingsTest( indexSettings, @@ -277,6 +278,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { assertThat(settings.get("index.allocation.max_retries"), equalTo("1")); assertThat(settings.getAsVersion("index.version.created", null), equalTo(version)); assertThat(settings.getAsVersion("index.version.upgraded", null), equalTo(upgraded)); + assertThat(settings.get("index.soft_deletes.enabled"), equalTo("true")); }); } @@ -337,6 +339,15 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { } + public void testDoNotOverrideSoftDeletesSettingOnResize() { + runPrepareResizeIndexSettingsTest( + Settings.builder().put("index.soft_deletes.enabled", "false").build(), + Settings.builder().put("index.soft_deletes.enabled", "true").build(), + Collections.emptyList(), + randomBoolean(), + settings -> assertThat(settings.get("index.soft_deletes.enabled"), equalTo("true"))); + } + private void runPrepareResizeIndexSettingsTest( final Settings sourceSettings, final Settings requestSettings, diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 0ee1d2e9c4a..6766316fafd 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -47,6 +47,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; +import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; @@ -1171,4 +1172,47 @@ public class ScopedSettingsTests extends ESTestCase { } } + public void testUpgradeListSetting() { + final Setting> oldSetting = + Setting.listSetting("foo.old", Collections.emptyList(), Function.identity(), Property.NodeScope); + final Setting> newSetting = + Setting.listSetting("foo.new", Collections.emptyList(), Function.identity(), Property.NodeScope); + + final AbstractScopedSettings service = + new ClusterSettings( + Settings.EMPTY, + new HashSet<>(Arrays.asList(oldSetting, newSetting)), + Collections.singleton(new SettingUpgrader>() { + + @Override + public Setting> getSetting() { + return oldSetting; + } + + @Override + public String getKey(final String key) { + return "foo.new"; + } + + @Override + public List getListValue(final List value) { + return value.stream().map(s -> "new." + s).collect(Collectors.toList()); + } + })); + + final int length = randomIntBetween(0, 16); + final List values = length == 0 ? Collections.emptyList() : new ArrayList<>(length); + for (int i = 0; i < length; i++) { + values.add(randomAlphaOfLength(8)); + } + + final Settings settings = Settings.builder().putList("foo.old", values).build(); + final Settings upgradedSettings = service.upgradeSettings(settings); + assertFalse(oldSetting.exists(upgradedSettings)); + assertTrue(newSetting.exists(upgradedSettings)); + assertThat( + newSetting.get(upgradedSettings), + equalTo(oldSetting.get(settings).stream().map(s -> "new." + s).collect(Collectors.toList()))); + } + } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index b13988b7050..30cfee81ddd 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -180,6 +180,13 @@ public class SettingTests extends ESTestCase { } } + public void testValidateStringSetting() { + Settings settings = Settings.builder().putList("foo.bar", Arrays.asList("bla-a", "bla-b")).build(); + Setting stringSetting = Setting.simpleString("foo.bar", Property.NodeScope); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> stringSetting.get(settings)); + assertEquals("Found list type value for setting [foo.bar] but but did not expect a list for it.", e.getMessage()); + } + private static final Setting FOO_BAR_SETTING = new Setting<>( "foo.bar", "foobar", diff --git a/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java b/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java index 839b96e6418..99161f842b7 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.transport.RemoteClusterService; import org.junit.After; import java.util.Arrays; @@ -122,4 +123,37 @@ public class UpgradeSettingsIT extends ESSingleNodeTestCase { assertThat(UpgradeSettingsPlugin.newSetting.get(settingsFunction.apply(response.getState().metaData())), equalTo("new." + value)); } + public void testUpgradeRemoteClusterSettings() { + final boolean skipUnavailable = randomBoolean(); + client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put("search.remote.foo.skip_unavailable", skipUnavailable) + .putList("search.remote.foo.seeds", Collections.singletonList("localhost:9200")) + .put("search.remote.foo.proxy", "localhost:9200") + .build()) + .get(); + + final ClusterStateResponse response = client().admin().cluster().prepareState().clear().setMetaData(true).get(); + + final Settings settings = response.getState().metaData().persistentSettings(); + assertFalse(RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue(RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); + assertThat( + RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").get(settings), + equalTo(skipUnavailable)); + assertFalse(RemoteClusterService.SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue(RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); + assertThat( + RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").get(settings), + equalTo(Collections.singletonList("localhost:9200"))); + assertFalse(RemoteClusterService.SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue(RemoteClusterService.REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace("foo").exists(settings)); + assertThat( + RemoteClusterService.REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace("foo").get(settings), equalTo("localhost:9200")); + } + } diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/FutureUtilsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/FutureUtilsTests.java new file mode 100644 index 00000000000..fb1265dd4d2 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/FutureUtilsTests.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util.concurrent; + +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.Future; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class FutureUtilsTests extends ESTestCase { + + public void testCancellingNullFutureOkay() { + FutureUtils.cancel(null); + } + + public void testRunningFutureNotInterrupted() { + final Future future = mock(Future.class); + FutureUtils.cancel(future); + verify(future).cancel(false); + } + +} \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java index ac2f2b0d4f3..c0b01eb5ec5 100644 --- a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java @@ -111,6 +111,7 @@ public abstract class AbstractDisruptionTestCase extends ESIntegTestCase { super.beforeIndexDeletion(); internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); assertSeqNos(); + assertSameDocIdsOnShards(); } } diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java b/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java new file mode 100644 index 00000000000..429950bf853 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.zen; + +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; +import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; +import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.LIMIT_LOCAL_PORTS_COUNT; +import static org.elasticsearch.transport.TcpTransport.PORT; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) +public class SettingsBasedHostProviderIT extends ESIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + + // super.nodeSettings enables file-based discovery, but here we disable it again so we can test the static list: + if (randomBoolean()) { + builder.putList(DISCOVERY_HOSTS_PROVIDER_SETTING.getKey()); + } else { + builder.remove(DISCOVERY_HOSTS_PROVIDER_SETTING.getKey()); + } + + // super.nodeSettings sets this to an empty list, which disables any search for other nodes, but here we want this to happen: + builder.remove(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey()); + + return builder.build(); + } + + public void testClusterFormsWithSingleSeedHostInSettings() { + final String seedNodeName = internalCluster().startNode(); + final NodesInfoResponse nodesInfoResponse + = client(seedNodeName).admin().cluster().nodesInfo(new NodesInfoRequest("_local")).actionGet(); + final String seedNodeAddress = nodesInfoResponse.getNodes().get(0).getTransport().getAddress().publishAddress().toString(); + logger.info("--> using seed node address {}", seedNodeAddress); + + int extraNodes = randomIntBetween(1, 5); + internalCluster().startNodes(extraNodes, + Settings.builder().putList(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), seedNodeAddress).build()); + + ensureStableCluster(extraNodes + 1); + } + + public void testClusterFormsByScanningPorts() { + // This test will fail if all 4 ports just less than the one used by the first node are already bound by something else. It's hard + // to know how often this might happen in reality, so let's try it and see. + + final String seedNodeName = internalCluster().startNode(); + final NodesInfoResponse nodesInfoResponse + = client(seedNodeName).admin().cluster().nodesInfo(new NodesInfoRequest("_local")).actionGet(); + final int seedNodePort = nodesInfoResponse.getNodes().get(0).getTransport().getAddress().publishAddress().getPort(); + final int minPort = randomIntBetween(seedNodePort - LIMIT_LOCAL_PORTS_COUNT + 1, seedNodePort - 1); + final String portSpec = minPort + "-" + seedNodePort; + + logger.info("--> using port specification [{}]", portSpec); + internalCluster().startNode(Settings.builder().put(PORT.getKey(), portSpec)); + ensureStableCluster(2); + } +} diff --git a/server/src/test/java/org/elasticsearch/http/HttpInfoTests.java b/server/src/test/java/org/elasticsearch/http/HttpInfoTests.java new file mode 100644 index 00000000000..db149bd6d0d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/http/HttpInfoTests.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.Map; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.ESTestCase; + +public class HttpInfoTests extends ESTestCase { + + public void testCorrectlyDisplayPublishedCname() throws Exception { + InetAddress localhost = InetAddress.getByName("localhost"); + int port = 9200; + assertPublishAddress( + new HttpInfo( + new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(localhost, port)}, + new TransportAddress(localhost, port) + ), 0L, true + ), "localhost/" + NetworkAddress.format(localhost) + ':' + port + ); + } + + public void hideCnameIfDeprecatedFormat() throws Exception { + InetAddress localhost = InetAddress.getByName("localhost"); + int port = 9200; + assertPublishAddress( + new HttpInfo( + new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(localhost, port)}, + new TransportAddress(localhost, port) + ), 0L, false + ), NetworkAddress.format(localhost) + ':' + port + ); + } + + public void testCorrectDisplayPublishedIp() throws Exception { + InetAddress localhost = InetAddress.getByName(NetworkAddress.format(InetAddress.getByName("localhost"))); + int port = 9200; + assertPublishAddress( + new HttpInfo( + new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(localhost, port)}, + new TransportAddress(localhost, port) + ), 0L, true + ), NetworkAddress.format(localhost) + ':' + port + ); + } + + public void testCorrectDisplayPublishedIpv6() throws Exception { + int port = 9200; + TransportAddress localhost = + new TransportAddress(InetAddress.getByName(NetworkAddress.format(InetAddress.getByName("0:0:0:0:0:0:0:1"))), port); + assertPublishAddress( + new HttpInfo( + new BoundTransportAddress(new TransportAddress[]{localhost}, localhost), 0L, true + ), localhost.toString() + ); + } + + @SuppressWarnings("unchecked") + private void assertPublishAddress(HttpInfo httpInfo, String expected) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + httpInfo.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + assertEquals( + expected, + ((Map) createParser(builder).map().get(HttpInfo.Fields.HTTP)) + .get(HttpInfo.Fields.PUBLISH_ADDRESS) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index a26fd72468b..8f9d90154f8 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -4087,7 +4087,7 @@ public class InternalEngineTests extends EngineTestCase { final long currentLocalCheckpoint = actualEngine.getLocalCheckpoint(); final long resetLocalCheckpoint = randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), Math.toIntExact(currentLocalCheckpoint)); - actualEngine.resetLocalCheckpoint(resetLocalCheckpoint); + actualEngine.getLocalCheckpointTracker().resetCheckpoint(resetLocalCheckpoint); completedSeqNos.clear(); actualEngine.restoreLocalCheckpointFromTranslog(); final Set intersection = new HashSet<>(expectedCompletedSeqNos); @@ -5033,7 +5033,7 @@ public class InternalEngineTests extends EngineTestCase { expectThrows(AlreadyClosedException.class, () -> engine.acquireSearcher("test")); } - private static void trimUnsafeCommits(EngineConfig config) throws IOException { + static void trimUnsafeCommits(EngineConfig config) throws IOException { final Store store = config.getStore(); final TranslogConfig translogConfig = config.getTranslogConfig(); final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index 286e85cef3f..115785b2e7b 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -41,6 +41,7 @@ import java.util.concurrent.atomic.AtomicLong; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.nullValue; public class LiveVersionMapTests extends ESTestCase { @@ -91,6 +92,19 @@ public class LiveVersionMapTests extends ESTestCase { assertEquals(actualRamBytesUsed, estimatedRamBytesUsed, tolerance); } + public void testRefreshingBytes() throws IOException { + LiveVersionMap map = new LiveVersionMap(); + BytesRefBuilder uid = new BytesRefBuilder(); + uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20)); + try (Releasable r = map.acquireLock(uid.toBytesRef())) { + map.putIndexUnderLock(uid.toBytesRef(), randomIndexVersionValue()); + } + map.beforeRefresh(); + assertThat(map.getRefreshingBytes(), greaterThan(0L)); + map.afterRefresh(true); + assertThat(map.getRefreshingBytes(), equalTo(0L)); + } + private BytesRef uid(String string) { BytesRefBuilder builder = new BytesRefBuilder(); builder.copyChars(string); diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java new file mode 100644 index 00000000000..4080dd33d53 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.engine; + +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.store.Store; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; + +import static org.hamcrest.Matchers.equalTo; + +public class ReadOnlyEngineTests extends EngineTestCase { + + public void testReadOnlyEngine() throws Exception { + IOUtils.close(engine, store); + Engine readOnlyEngine = null; + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + int numDocs = scaledRandomIntBetween(10, 1000); + final SeqNoStats lastSeqNoStats; + final List lastDocIds; + try (InternalEngine engine = createEngine(config)) { + Engine.Get get = null; + for (int i = 0; i < numDocs; i++) { + if (rarely()) { + continue; // gap in sequence number + } + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); + engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA, + System.nanoTime(), -1, false)); + if (get == null || rarely()) { + get = newGet(randomBoolean(), doc); + } + if (rarely()) { + engine.flush(); + } + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + } + engine.syncTranslog(); + engine.flush(); + readOnlyEngine = new ReadOnlyEngine(engine.engineConfig, engine.getSeqNoStats(globalCheckpoint.get()), + engine.getTranslogStats(), false, Function.identity()); + lastSeqNoStats = engine.getSeqNoStats(globalCheckpoint.get()); + lastDocIds = getDocIds(engine, true); + assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); + assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); + for (int i = 0; i < numDocs; i++) { + if (randomBoolean()) { + String delId = Integer.toString(i); + engine.delete(new Engine.Delete("test", delId, newUid(delId), primaryTerm.get())); + } + if (rarely()) { + engine.flush(); + } + } + Engine.Searcher external = readOnlyEngine.acquireSearcher("test", Engine.SearcherScope.EXTERNAL); + Engine.Searcher internal = readOnlyEngine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + assertSame(external.reader(), internal.reader()); + IOUtils.close(external, internal); + // the locked down engine should still point to the previous commit + assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); + assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); + try (Engine.GetResult getResult = readOnlyEngine.get(get, readOnlyEngine::acquireSearcher)) { + assertTrue(getResult.exists()); + } + + } + // Close and reopen the main engine + InternalEngineTests.trimUnsafeCommits(config); + try (InternalEngine recoveringEngine = new InternalEngine(config)) { + recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); + // the locked down engine should still point to the previous commit + assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); + assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); + } + } finally { + IOUtils.close(readOnlyEngine); + } + } + + public void testFlushes() throws IOException { + IOUtils.close(engine, store); + Engine readOnlyEngine = null; + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + int numDocs = scaledRandomIntBetween(10, 1000); + try (InternalEngine engine = createEngine(config)) { + for (int i = 0; i < numDocs; i++) { + if (rarely()) { + continue; // gap in sequence number + } + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); + engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA, + System.nanoTime(), -1, false)); + if (rarely()) { + engine.flush(); + } + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + } + engine.syncTranslog(); + engine.flushAndClose(); + readOnlyEngine = new ReadOnlyEngine(engine.engineConfig, null , null, true, Function.identity()); + Engine.CommitId flush = readOnlyEngine.flush(randomBoolean(), randomBoolean()); + assertEquals(flush, readOnlyEngine.flush(randomBoolean(), randomBoolean())); + } finally { + IOUtils.close(readOnlyEngine); + } + } + } + + public void testReadOnly() throws IOException { + IOUtils.close(engine, store); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + store.createEmpty(); + try (ReadOnlyEngine readOnlyEngine = new ReadOnlyEngine(config, null , null, true, Function.identity())) { + Class expectedException = LuceneTestCase.TEST_ASSERTS_ENABLED ? AssertionError.class : + UnsupportedOperationException.class; + expectThrows(expectedException, () -> readOnlyEngine.index(null)); + expectThrows(expectedException, () -> readOnlyEngine.delete(null)); + expectThrows(expectedException, () -> readOnlyEngine.noOp(null)); + expectThrows(UnsupportedOperationException.class, () -> readOnlyEngine.syncFlush(null, null)); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index cc224019100..624205a1a3c 100644 --- a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query.functionscore; import com.fasterxml.jackson.core.JsonParseException; + import org.apache.lucene.index.Term; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; @@ -272,6 +273,8 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase builder.scoreMode(null)); expectThrows(IllegalArgumentException.class, () -> builder.boostMode(null)); + expectThrows(IllegalArgumentException.class, + () -> new FunctionScoreQueryBuilder.FilterFunctionBuilder(new WeightBuilder().setWeight(-1 * randomFloat()))); } public void testParseFunctionsArray() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index e471874f6d6..f2cdfbf8fc5 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -519,18 +519,14 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase shards.promoteReplicaToPrimary(replica2).get(); logger.info("--> Recover replica3 from replica2"); recoverReplica(replica3, replica2, true); - try (Translog.Snapshot snapshot = getTranslog(replica3).newSnapshot()) { + try (Translog.Snapshot snapshot = replica3.getHistoryOperations("test", 0)) { assertThat(snapshot.totalOperations(), equalTo(initDocs + 1)); final List expectedOps = new ArrayList<>(initOperations); expectedOps.add(op2); assertThat(snapshot, containsOperationsInAnyOrder(expectedOps)); assertThat("Peer-recovery should not send overridden operations", snapshot.skippedOperations(), equalTo(0)); } - // TODO: We should assert the content of shards in the ReplicationGroup. - // Without rollback replicas(current implementation), we don't have the same content across shards: - // - replica1 has {doc1} - // - replica2 has {doc1, doc2} - // - replica3 can have either {doc2} only if operation-based recovery or {doc1, doc2} if file-based recovery + shards.assertAllEqual(initDocs + 1); } } diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 28122665e9b..a73d7385d9d 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -55,10 +55,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicBoolean; @@ -306,14 +304,6 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedOpsOnPrimary)); } - - // roll back the extra ops in the replica - shards.removeReplica(replica); - replica.close("resync", false); - replica.store().close(); - newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); - shards.recoverReplica(newReplica); - shards.assertAllEqual(totalDocs); // Make sure that flushing on a recovering shard is ok. shards.flush(); shards.assertAllEqual(totalDocs); @@ -406,31 +396,14 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC indexOnReplica(bulkShardRequest, shards, justReplica); } - logger.info("--> seqNo primary {} replica {}", oldPrimary.seqNoStats(), newPrimary.seqNoStats()); - - logger.info("--> resyncing replicas"); + logger.info("--> resyncing replicas seqno_stats primary {} replica {}", oldPrimary.seqNoStats(), newPrimary.seqNoStats()); PrimaryReplicaSyncer.ResyncTask task = shards.promoteReplicaToPrimary(newPrimary).get(); if (syncedGlobalCheckPoint) { assertEquals(extraDocs, task.getResyncedOperations()); } else { assertThat(task.getResyncedOperations(), greaterThanOrEqualTo(extraDocs)); } - List replicas = shards.getReplicas(); - - // check all docs on primary are available on replica - Set primaryIds = getShardDocUIDs(newPrimary); - assertThat(primaryIds.size(), equalTo(initialDocs + extraDocs)); - for (IndexShard replica : replicas) { - Set replicaIds = getShardDocUIDs(replica); - Set temp = new HashSet<>(primaryIds); - temp.removeAll(replicaIds); - assertThat(replica.routingEntry() + " is missing docs", temp, empty()); - temp = new HashSet<>(replicaIds); - temp.removeAll(primaryIds); - // yeah, replica has more docs as there is no Lucene roll back on it - assertThat(replica.routingEntry() + " has to have extra docs", temp, - extraDocsToBeTrimmed > 0 ? not(empty()) : empty()); - } + shards.assertAllEqual(initialDocs + extraDocs); // check translog on replica is trimmed int translogOperations = 0; diff --git a/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java index 43b16c6ecc7..e5e2453682f 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java @@ -21,8 +21,12 @@ package org.elasticsearch.index.shard; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; +import org.junit.After; import org.mockito.ArgumentCaptor; import java.io.IOException; @@ -35,14 +39,20 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.times; @@ -50,10 +60,18 @@ import static org.mockito.Mockito.verify; public class GlobalCheckpointListenersTests extends ESTestCase { - final ShardId shardId = new ShardId(new Index("index", "uuid"), 0); + private final ShardId shardId = new ShardId(new Index("index", "uuid"), 0); + private final ScheduledThreadPoolExecutor scheduler = + new ScheduledThreadPoolExecutor(1, EsExecutors.daemonThreadFactory(Settings.EMPTY, "scheduler")); + + @After + public void shutdownScheduler() { + scheduler.shutdown(); + } public void testGlobalCheckpointUpdated() throws IOException { - final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, Runnable::run, logger); + final GlobalCheckpointListeners globalCheckpointListeners = + new GlobalCheckpointListeners(shardId, Runnable::run, scheduler, logger); globalCheckpointListeners.globalCheckpointUpdated(NO_OPS_PERFORMED); final int numberOfListeners = randomIntBetween(0, 16); final long[] globalCheckpoints = new long[numberOfListeners]; @@ -69,7 +87,7 @@ public class GlobalCheckpointListenersTests extends ESTestCase { assert e == null; globalCheckpoints[index] = g; }; - globalCheckpointListeners.add(NO_OPS_PERFORMED, listener); + globalCheckpointListeners.add(NO_OPS_PERFORMED, listener, null); } final long globalCheckpoint = randomLongBetween(NO_OPS_PERFORMED, Long.MAX_VALUE); globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint); @@ -92,7 +110,8 @@ public class GlobalCheckpointListenersTests extends ESTestCase { } public void testListenersReadyToBeNotified() throws IOException { - final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, Runnable::run, logger); + final GlobalCheckpointListeners globalCheckpointListeners = + new GlobalCheckpointListeners(shardId, Runnable::run, scheduler, logger); final long globalCheckpoint = randomLongBetween(NO_OPS_PERFORMED + 1, Long.MAX_VALUE); globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint); final int numberOfListeners = randomIntBetween(0, 16); @@ -109,7 +128,7 @@ public class GlobalCheckpointListenersTests extends ESTestCase { assert e == null; globalCheckpoints[index] = g; }; - globalCheckpointListeners.add(randomLongBetween(NO_OPS_PERFORMED, globalCheckpoint - 1), listener); + globalCheckpointListeners.add(randomLongBetween(NO_OPS_PERFORMED, globalCheckpoint - 1), listener, null); // the listener should be notified immediately assertThat(globalCheckpoints[index], equalTo(globalCheckpoint)); } @@ -130,7 +149,8 @@ public class GlobalCheckpointListenersTests extends ESTestCase { public void testFailingListenerReadyToBeNotified() { final Logger mockLogger = mock(Logger.class); - final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, Runnable::run, mockLogger); + final GlobalCheckpointListeners globalCheckpointListeners = + new GlobalCheckpointListeners(shardId, Runnable::run, scheduler, mockLogger); final long globalCheckpoint = randomLongBetween(NO_OPS_PERFORMED + 1, Long.MAX_VALUE); globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint); final int numberOfListeners = randomIntBetween(0, 16); @@ -149,7 +169,7 @@ public class GlobalCheckpointListenersTests extends ESTestCase { globalCheckpoints[index] = globalCheckpoint; } }; - globalCheckpointListeners.add(randomLongBetween(NO_OPS_PERFORMED, globalCheckpoint - 1), listener); + globalCheckpointListeners.add(randomLongBetween(NO_OPS_PERFORMED, globalCheckpoint - 1), listener, null); // the listener should be notified immediately if (failure) { assertThat(globalCheckpoints[i], equalTo(Long.MIN_VALUE)); @@ -172,10 +192,11 @@ public class GlobalCheckpointListenersTests extends ESTestCase { } public void testClose() throws IOException { - final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, Runnable::run, logger); + final GlobalCheckpointListeners globalCheckpointListeners = + new GlobalCheckpointListeners(shardId, Runnable::run, scheduler, logger); globalCheckpointListeners.globalCheckpointUpdated(NO_OPS_PERFORMED); final int numberOfListeners = randomIntBetween(0, 16); - final IndexShardClosedException[] exceptions = new IndexShardClosedException[numberOfListeners]; + final Exception[] exceptions = new Exception[numberOfListeners]; for (int i = 0; i < numberOfListeners; i++) { final int index = i; final AtomicBoolean invoked = new AtomicBoolean(); @@ -188,12 +209,13 @@ public class GlobalCheckpointListenersTests extends ESTestCase { assert e != null; exceptions[index] = e; }; - globalCheckpointListeners.add(NO_OPS_PERFORMED, listener); + globalCheckpointListeners.add(NO_OPS_PERFORMED, listener, null); } globalCheckpointListeners.close(); for (int i = 0; i < numberOfListeners; i++) { assertNotNull(exceptions[i]); - assertThat(exceptions[i].getShardId(), equalTo(shardId)); + assertThat(exceptions[i], instanceOf(IndexShardClosedException.class)); + assertThat(((IndexShardClosedException)exceptions[i]).getShardId(), equalTo(shardId)); } // test the listeners are not invoked twice @@ -207,7 +229,8 @@ public class GlobalCheckpointListenersTests extends ESTestCase { } public void testAddAfterClose() throws InterruptedException, IOException { - final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, Runnable::run, logger); + final GlobalCheckpointListeners globalCheckpointListeners = + new GlobalCheckpointListeners(shardId, Runnable::run, scheduler, logger); globalCheckpointListeners.globalCheckpointUpdated(NO_OPS_PERFORMED); globalCheckpointListeners.close(); final AtomicBoolean invoked = new AtomicBoolean(); @@ -221,14 +244,15 @@ public class GlobalCheckpointListenersTests extends ESTestCase { } latch.countDown(); }; - globalCheckpointListeners.add(randomLongBetween(NO_OPS_PERFORMED, Long.MAX_VALUE), listener); + globalCheckpointListeners.add(randomLongBetween(NO_OPS_PERFORMED, Long.MAX_VALUE), listener, null); latch.await(); assertTrue(invoked.get()); } public void testFailingListenerOnUpdate() { final Logger mockLogger = mock(Logger.class); - final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, Runnable::run, mockLogger); + final GlobalCheckpointListeners globalCheckpointListeners = + new GlobalCheckpointListeners(shardId, Runnable::run, scheduler, mockLogger); globalCheckpointListeners.globalCheckpointUpdated(NO_OPS_PERFORMED); final int numberOfListeners = randomIntBetween(0, 16); final boolean[] failures = new boolean[numberOfListeners]; @@ -248,7 +272,7 @@ public class GlobalCheckpointListenersTests extends ESTestCase { globalCheckpoints[index] = g; } }; - globalCheckpointListeners.add(NO_OPS_PERFORMED, listener); + globalCheckpointListeners.add(NO_OPS_PERFORMED, listener, null); } final long globalCheckpoint = randomLongBetween(NO_OPS_PERFORMED, Long.MAX_VALUE); globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint); @@ -282,11 +306,12 @@ public class GlobalCheckpointListenersTests extends ESTestCase { public void testFailingListenerOnClose() throws IOException { final Logger mockLogger = mock(Logger.class); - final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, Runnable::run, mockLogger); + final GlobalCheckpointListeners globalCheckpointListeners = + new GlobalCheckpointListeners(shardId, Runnable::run, scheduler, mockLogger); globalCheckpointListeners.globalCheckpointUpdated(NO_OPS_PERFORMED); final int numberOfListeners = randomIntBetween(0, 16); final boolean[] failures = new boolean[numberOfListeners]; - final IndexShardClosedException[] exceptions = new IndexShardClosedException[numberOfListeners]; + final Exception[] exceptions = new Exception[numberOfListeners]; for (int i = 0; i < numberOfListeners; i++) { final int index = i; final boolean failure = randomBoolean(); @@ -301,7 +326,7 @@ public class GlobalCheckpointListenersTests extends ESTestCase { exceptions[index] = e; } }; - globalCheckpointListeners.add(NO_OPS_PERFORMED, listener); + globalCheckpointListeners.add(NO_OPS_PERFORMED, listener, null); } globalCheckpointListeners.close(); for (int i = 0; i < numberOfListeners; i++) { @@ -309,7 +334,8 @@ public class GlobalCheckpointListenersTests extends ESTestCase { assertNull(exceptions[i]); } else { assertNotNull(exceptions[i]); - assertThat(exceptions[i].getShardId(), equalTo(shardId)); + assertThat(exceptions[i], instanceOf(IndexShardClosedException.class)); + assertThat(((IndexShardClosedException)exceptions[i]).getShardId(), equalTo(shardId)); } } int failureCount = 0; @@ -334,30 +360,90 @@ public class GlobalCheckpointListenersTests extends ESTestCase { count.incrementAndGet(); command.run(); }; - final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, logger); + final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, scheduler, logger); globalCheckpointListeners.globalCheckpointUpdated(NO_OPS_PERFORMED); + final long globalCheckpoint = randomLongBetween(NO_OPS_PERFORMED, Long.MAX_VALUE); + final AtomicInteger notified = new AtomicInteger(); final int numberOfListeners = randomIntBetween(0, 16); for (int i = 0; i < numberOfListeners; i++) { - globalCheckpointListeners.add(NO_OPS_PERFORMED, (g, e) -> {}); + globalCheckpointListeners.add( + NO_OPS_PERFORMED, + (g, e) -> { + notified.incrementAndGet(); + assertThat(g, equalTo(globalCheckpoint)); + assertNull(e); + }, + null); } - globalCheckpointListeners.globalCheckpointUpdated(randomLongBetween(NO_OPS_PERFORMED, Long.MAX_VALUE)); + globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint); + assertThat(notified.get(), equalTo(numberOfListeners)); assertThat(count.get(), equalTo(numberOfListeners == 0 ? 0 : 1)); } + public void testNotificationOnClosedUsesExecutor() throws IOException { + final AtomicInteger count = new AtomicInteger(); + final Executor executor = command -> { + count.incrementAndGet(); + command.run(); + }; + final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, scheduler, logger); + globalCheckpointListeners.close(); + final AtomicInteger notified = new AtomicInteger(); + final int numberOfListeners = randomIntBetween(0, 16); + for (int i = 0; i < numberOfListeners; i++) { + globalCheckpointListeners.add( + NO_OPS_PERFORMED, + (g, e) -> { + notified.incrementAndGet(); + assertThat(g, equalTo(UNASSIGNED_SEQ_NO)); + assertNotNull(e); + assertThat(e, instanceOf(IndexShardClosedException.class)); + assertThat(((IndexShardClosedException) e).getShardId(), equalTo(shardId)); + }, + null); + } + assertThat(notified.get(), equalTo(numberOfListeners)); + assertThat(count.get(), equalTo(numberOfListeners)); + } + + public void testListenersReadyToBeNotifiedUsesExecutor() { + final AtomicInteger count = new AtomicInteger(); + final Executor executor = command -> { + count.incrementAndGet(); + command.run(); + }; + final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, scheduler, logger); + final long globalCheckpoint = randomNonNegativeLong(); + globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint); + final AtomicInteger notified = new AtomicInteger(); + final int numberOfListeners = randomIntBetween(0, 16); + for (int i = 0; i < numberOfListeners; i++) { + globalCheckpointListeners.add( + randomLongBetween(0, globalCheckpoint), + (g, e) -> { + notified.incrementAndGet(); + assertThat(g, equalTo(globalCheckpoint)); + assertNull(e); + }, null); + } + assertThat(notified.get(), equalTo(numberOfListeners)); + assertThat(count.get(), equalTo(numberOfListeners)); + } + public void testConcurrency() throws BrokenBarrierException, InterruptedException { final ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, 8)); - final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, logger); + final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, scheduler, logger); final AtomicLong globalCheckpoint = new AtomicLong(NO_OPS_PERFORMED); globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint.get()); // we are going to synchronize the actions of three threads: the updating thread, the listener thread, and the main test thread final CyclicBarrier barrier = new CyclicBarrier(3); - final int numberOfIterations = randomIntBetween(1, 1024); + final int numberOfIterations = randomIntBetween(1, 4096); final AtomicBoolean closed = new AtomicBoolean(); final Thread updatingThread = new Thread(() -> { // synchronize starting with the listener thread and the main test thread awaitQuietly(barrier); for (int i = 0; i < numberOfIterations; i++) { - if (rarely() && closed.get() == false) { + if (i > numberOfIterations / 2 && rarely() && closed.get() == false) { closed.set(true); try { globalCheckpointListeners.close(); @@ -365,7 +451,7 @@ public class GlobalCheckpointListenersTests extends ESTestCase { throw new UncheckedIOException(e); } } - if (closed.get() == false) { + if (rarely() && closed.get() == false) { globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint.incrementAndGet()); } } @@ -387,7 +473,8 @@ public class GlobalCheckpointListenersTests extends ESTestCase { if (invocation.compareAndSet(false, true) == false) { throw new IllegalStateException("listener invoked twice"); } - }); + }, + randomBoolean() ? null : TimeValue.timeValueNanos(randomLongBetween(1, TimeUnit.MICROSECONDS.toNanos(1)))); } // synchronize ending with the updating thread and the main test thread awaitQuietly(barrier); @@ -412,6 +499,107 @@ public class GlobalCheckpointListenersTests extends ESTestCase { listenersThread.join(); } + public void testTimeout() throws InterruptedException { + final Logger mockLogger = mock(Logger.class); + final GlobalCheckpointListeners globalCheckpointListeners = + new GlobalCheckpointListeners(shardId, Runnable::run, scheduler, mockLogger); + final TimeValue timeout = TimeValue.timeValueMillis(randomIntBetween(1, 50)); + final AtomicBoolean notified = new AtomicBoolean(); + final CountDownLatch latch = new CountDownLatch(1); + globalCheckpointListeners.add( + NO_OPS_PERFORMED, + (g, e) -> { + try { + notified.set(true); + assertThat(g, equalTo(UNASSIGNED_SEQ_NO)); + assertThat(e, instanceOf(TimeoutException.class)); + assertThat(e, hasToString(containsString(timeout.getStringRep()))); + final ArgumentCaptor message = ArgumentCaptor.forClass(String.class); + final ArgumentCaptor t = ArgumentCaptor.forClass(TimeoutException.class); + verify(mockLogger).trace(message.capture(), t.capture()); + assertThat(message.getValue(), equalTo("global checkpoint listener timed out")); + assertThat(t.getValue(), hasToString(containsString(timeout.getStringRep()))); + } catch (Exception caught) { + fail(e.getMessage()); + } finally { + latch.countDown(); + } + }, + timeout); + latch.await(); + + assertTrue(notified.get()); + } + + public void testTimeoutNotificationUsesExecutor() throws InterruptedException { + final AtomicInteger count = new AtomicInteger(); + final Executor executor = command -> { + count.incrementAndGet(); + command.run(); + }; + final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, scheduler, logger); + final TimeValue timeout = TimeValue.timeValueMillis(randomIntBetween(1, 50)); + final AtomicBoolean notified = new AtomicBoolean(); + final CountDownLatch latch = new CountDownLatch(1); + globalCheckpointListeners.add( + NO_OPS_PERFORMED, + (g, e) -> { + try { + notified.set(true); + assertThat(g, equalTo(UNASSIGNED_SEQ_NO)); + assertThat(e, instanceOf(TimeoutException.class)); + } finally { + latch.countDown(); + } + }, + timeout); + latch.await(); + // ensure the listener notification occurred on the executor + assertTrue(notified.get()); + assertThat(count.get(), equalTo(1)); + } + + public void testFailingListenerAfterTimeout() throws InterruptedException { + final Logger mockLogger = mock(Logger.class); + final GlobalCheckpointListeners globalCheckpointListeners = + new GlobalCheckpointListeners(shardId, Runnable::run, scheduler, mockLogger); + final CountDownLatch latch = new CountDownLatch(1); + final TimeValue timeout = TimeValue.timeValueMillis(randomIntBetween(1, 50)); + globalCheckpointListeners.add( + NO_OPS_PERFORMED, + (g, e) -> { + try { + throw new RuntimeException("failure"); + } finally { + latch.countDown(); + } + }, + timeout); + latch.await(); + final ArgumentCaptor message = ArgumentCaptor.forClass(String.class); + final ArgumentCaptor t = ArgumentCaptor.forClass(RuntimeException.class); + verify(mockLogger).warn(message.capture(), t.capture()); + assertThat(message.getValue(), equalTo("error notifying global checkpoint listener of timeout")); + assertNotNull(t.getValue()); + assertThat(t.getValue(), instanceOf(RuntimeException.class)); + assertThat(t.getValue().getMessage(), equalTo("failure")); + } + + public void testTimeoutCancelledAfterListenerNotified() { + final GlobalCheckpointListeners globalCheckpointListeners = + new GlobalCheckpointListeners(shardId, Runnable::run, scheduler, logger); + final TimeValue timeout = TimeValue.timeValueNanos(Long.MAX_VALUE); + final GlobalCheckpointListeners.GlobalCheckpointListener globalCheckpointListener = (g, e) -> { + assertThat(g, equalTo(NO_OPS_PERFORMED)); + assertNull(e); + }; + globalCheckpointListeners.add(NO_OPS_PERFORMED, globalCheckpointListener, timeout); + final ScheduledFuture future = globalCheckpointListeners.getTimeoutFuture(globalCheckpointListener); + assertNotNull(future); + globalCheckpointListeners.globalCheckpointUpdated(NO_OPS_PERFORMED); + assertTrue(future.isCancelled()); + } + private void awaitQuietly(final CyclicBarrier barrier) { try { barrier.await(); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 87edfcfccb1..8fe1daefe6d 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -89,6 +89,7 @@ import java.util.Locale; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -113,6 +114,8 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; public class IndexShardIT extends ESSingleNodeTestCase { @@ -746,10 +749,11 @@ public class IndexShardIT extends ESSingleNodeTestCase { shard.addGlobalCheckpointListener( i - 1, (g, e) -> { - assert g >= NO_OPS_PERFORMED; - assert e == null; + assertThat(g, greaterThanOrEqualTo(NO_OPS_PERFORMED)); + assertNull(e); globalCheckpoint.set(g); - }); + }, + null); client().prepareIndex("test", "_doc", Integer.toString(i)).setSource("{}", XContentType.JSON).get(); assertBusy(() -> assertThat(globalCheckpoint.get(), equalTo((long) index))); // adding a listener expecting a lower global checkpoint should fire immediately @@ -757,10 +761,11 @@ public class IndexShardIT extends ESSingleNodeTestCase { shard.addGlobalCheckpointListener( randomLongBetween(NO_OPS_PERFORMED, i - 1), (g, e) -> { - assert g >= NO_OPS_PERFORMED; - assert e == null; + assertThat(g, greaterThanOrEqualTo(NO_OPS_PERFORMED)); + assertNull(e); immediateGlobalCheckpint.set(g); - }); + }, + null); assertBusy(() -> assertThat(immediateGlobalCheckpint.get(), equalTo((long) index))); } final AtomicBoolean invoked = new AtomicBoolean(); @@ -768,12 +773,40 @@ public class IndexShardIT extends ESSingleNodeTestCase { numberOfUpdates - 1, (g, e) -> { invoked.set(true); - assert g == UNASSIGNED_SEQ_NO; - assert e != null; - assertThat(e.getShardId(), equalTo(shard.shardId())); - }); + assertThat(g, equalTo(UNASSIGNED_SEQ_NO)); + assertThat(e, instanceOf(IndexShardClosedException.class)); + assertThat(((IndexShardClosedException)e).getShardId(), equalTo(shard.shardId())); + }, + null); shard.close("closed", randomBoolean()); assertBusy(() -> assertTrue(invoked.get())); } + public void testGlobalCheckpointListenerTimeout() throws InterruptedException { + createIndex("test", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build()); + ensureGreen(); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService test = indicesService.indexService(resolveIndex("test")); + final IndexShard shard = test.getShardOrNull(0); + final AtomicBoolean notified = new AtomicBoolean(); + final CountDownLatch latch = new CountDownLatch(1); + final TimeValue timeout = TimeValue.timeValueMillis(randomIntBetween(1, 50)); + shard.addGlobalCheckpointListener( + NO_OPS_PERFORMED, + (g, e) -> { + try { + notified.set(true); + assertThat(g, equalTo(UNASSIGNED_SEQ_NO)); + assertNotNull(e); + assertThat(e, instanceOf(TimeoutException.class)); + assertThat(e.getMessage(), equalTo(timeout.getStringRep())); + } finally { + latch.countDown(); + } + }, + timeout); + latch.await(); + assertTrue(notified.get()); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 7f37846d3f0..9a5df39a970 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -106,6 +106,7 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.index.translog.TestTranslog; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.index.translog.TranslogTests; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -181,6 +182,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; /** * Simple unit-test IndexShard related operations. @@ -945,28 +947,25 @@ public class IndexShardTests extends IndexShardTestCase { resyncLatch.await(); assertThat(indexShard.getLocalCheckpoint(), equalTo(maxSeqNo)); assertThat(indexShard.seqNoStats().getMaxSeqNo(), equalTo(maxSeqNo)); - - closeShards(indexShard); + closeShard(indexShard, false); } - public void testThrowBackLocalCheckpointOnReplica() throws IOException, InterruptedException { + public void testRollbackReplicaEngineOnPromotion() throws IOException, InterruptedException { final IndexShard indexShard = newStartedShard(false); // most of the time this is large enough that most of the time there will be at least one gap final int operations = 1024 - scaledRandomIntBetween(0, 1024); indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED)); - final long globalCheckpointOnReplica = - randomIntBetween( - Math.toIntExact(SequenceNumbers.UNASSIGNED_SEQ_NO), - Math.toIntExact(indexShard.getLocalCheckpoint())); + final long globalCheckpointOnReplica = randomLongBetween(SequenceNumbers.UNASSIGNED_SEQ_NO, indexShard.getLocalCheckpoint()); indexShard.updateGlobalCheckpointOnReplica(globalCheckpointOnReplica, "test"); - - final int globalCheckpoint = - randomIntBetween( - Math.toIntExact(SequenceNumbers.UNASSIGNED_SEQ_NO), - Math.toIntExact(indexShard.getLocalCheckpoint())); + final long globalCheckpoint = randomLongBetween(SequenceNumbers.UNASSIGNED_SEQ_NO, indexShard.getLocalCheckpoint()); + Set docsBelowGlobalCheckpoint = getShardDocUIDs(indexShard).stream() + .filter(id -> Long.parseLong(id) <= Math.max(globalCheckpointOnReplica, globalCheckpoint)).collect(Collectors.toSet()); final CountDownLatch latch = new CountDownLatch(1); + final boolean shouldRollback = Math.max(globalCheckpoint, globalCheckpointOnReplica) < indexShard.seqNoStats().getMaxSeqNo() + && indexShard.seqNoStats().getMaxSeqNo() != SequenceNumbers.NO_OPS_PERFORMED; + final Engine beforeRollbackEngine = indexShard.getEngine(); indexShard.acquireReplicaOperationPermit( indexShard.pendingPrimaryTerm + 1, globalCheckpoint, @@ -985,18 +984,21 @@ public class IndexShardTests extends IndexShardTestCase { ThreadPool.Names.SAME, ""); latch.await(); - if (globalCheckpointOnReplica == SequenceNumbers.UNASSIGNED_SEQ_NO - && globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { + if (globalCheckpointOnReplica == SequenceNumbers.UNASSIGNED_SEQ_NO && globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { assertThat(indexShard.getLocalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); } else { assertThat(indexShard.getLocalCheckpoint(), equalTo(Math.max(globalCheckpoint, globalCheckpointOnReplica))); } - + assertThat(getShardDocUIDs(indexShard), equalTo(docsBelowGlobalCheckpoint)); + if (shouldRollback) { + assertThat(indexShard.getEngine(), not(sameInstance(beforeRollbackEngine))); + } else { + assertThat(indexShard.getEngine(), sameInstance(beforeRollbackEngine)); + } // ensure that after the local checkpoint throw back and indexing again, the local checkpoint advances final Result result = indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(indexShard.getLocalCheckpoint())); assertThat(indexShard.getLocalCheckpoint(), equalTo((long) result.localCheckpoint)); - - closeShards(indexShard); + closeShard(indexShard, false); } public void testConcurrentTermIncreaseOnReplicaShard() throws BrokenBarrierException, InterruptedException, IOException { @@ -1880,13 +1882,17 @@ public class IndexShardTests extends IndexShardTestCase { SourceToParse.source(indexName, "_doc", "doc-1", new BytesArray("{}"), XContentType.JSON)); flushShard(shard); assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1")); - // Simulate resync (without rollback): Noop #1, index #2 - acquireReplicaOperationPermitBlockingly(shard, shard.pendingPrimaryTerm + 1); + // Here we try to increase term (i.e. a new primary is promoted) without rolling back a replica so we can keep stale operations + // in the index commit; then verify that a recovery from store (started with the safe commit) will remove all stale operations. + shard.pendingPrimaryTerm++; + shard.operationPrimaryTerm++; + shard.getEngine().rollTranslogGeneration(); shard.markSeqNoAsNoop(1, "test"); shard.applyIndexOperationOnReplica(2, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, SourceToParse.source(indexName, "_doc", "doc-2", new BytesArray("{}"), XContentType.JSON)); flushShard(shard); assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1", "doc-2")); + closeShard(shard, false); // Recovering from store should discard doc #1 final ShardRouting replicaRouting = shard.routingEntry(); IndexShard newShard = reinitShard(shard, @@ -2249,10 +2255,11 @@ public class IndexShardTests extends IndexShardTestCase { null)); primary.recoverFromStore(); + primary.recoveryState().getTranslog().totalOperations(snapshot.totalOperations()); + primary.recoveryState().getTranslog().totalOperationsOnStart(snapshot.totalOperations()); primary.state = IndexShardState.RECOVERING; // translog recovery on the next line would otherwise fail as we are in POST_RECOVERY - primary.runTranslogRecovery(primary.getEngine(), snapshot); - assertThat(primary.recoveryState().getTranslog().totalOperationsOnStart(), equalTo(numTotalEntries)); - assertThat(primary.recoveryState().getTranslog().totalOperations(), equalTo(numTotalEntries)); + primary.runTranslogRecovery(primary.getEngine(), snapshot, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, + primary.recoveryState().getTranslog()::incrementRecoveredOperations); assertThat(primary.recoveryState().getTranslog().recoveredOperations(), equalTo(numTotalEntries - numCorruptEntries)); closeShards(primary); @@ -2865,6 +2872,9 @@ public class IndexShardTests extends IndexShardTestCase { } else { gap = true; } + if (rarely()) { + indexShard.flush(new FlushRequest()); + } } assert localCheckpoint == indexShard.getLocalCheckpoint(); assert !gap || (localCheckpoint != max); @@ -2959,7 +2969,8 @@ public class IndexShardTests extends IndexShardTestCase { } @Override - public void snapshotShard(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, + IndexShardSnapshotStatus snapshotStatus) { } @Override @@ -3402,4 +3413,19 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(shard); } + + public void testResetEngine() throws Exception { + IndexShard shard = newStartedShard(false); + indexOnReplicaWithGaps(shard, between(0, 1000), Math.toIntExact(shard.getLocalCheckpoint())); + final long globalCheckpoint = randomLongBetween(shard.getGlobalCheckpoint(), shard.getLocalCheckpoint()); + shard.updateGlobalCheckpointOnReplica(globalCheckpoint, "test"); + Set docBelowGlobalCheckpoint = getShardDocUIDs(shard).stream() + .filter(id -> Long.parseLong(id) <= globalCheckpoint).collect(Collectors.toSet()); + TranslogStats translogStats = shard.translogStats(); + shard.resetEngineToGlobalCheckpoint(); + assertThat(getShardDocUIDs(shard), equalTo(docBelowGlobalCheckpoint)); + assertThat(shard.seqNoStats().getMaxSeqNo(), equalTo(globalCheckpoint)); + assertThat(shard.translogStats().estimatedNumberOfOperations(), equalTo(translogStats.estimatedNumberOfOperations())); + closeShard(shard, false); + } } diff --git a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java index cb93d803bb7..8d0f1845be6 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -103,6 +103,7 @@ public class RelocationIT extends ESIntegTestCase { protected void beforeIndexDeletion() throws Exception { super.beforeIndexDeletion(); assertSeqNos(); + assertSameDocIdsOnShards(); } public void testSimpleRelocationNoIndexing() { diff --git a/server/src/test/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/test/java/org/elasticsearch/search/SearchCancellationIT.java index 0294f9f67f8..2e28d16c71d 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/test/java/org/elasticsearch/search/SearchCancellationIT.java @@ -69,7 +69,10 @@ public class SearchCancellationIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { boolean lowLevelCancellation = randomBoolean(); logger.info("Using lowLevelCancellation: {}", lowLevelCancellation); - return Settings.builder().put(SearchService.LOW_LEVEL_CANCELLATION_SETTING.getKey(), lowLevelCancellation).build(); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(SearchService.LOW_LEVEL_CANCELLATION_SETTING.getKey(), lowLevelCancellation) + .build(); } private void indexTestData() { diff --git a/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index 44c49ace5de..3c91cda5f86 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -275,7 +275,6 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { assertSuggestions("foo", typeFilterSuggest, "suggestion9", "suggestion6", "suggestion5", "suggestion2", "suggestion1"); } - @AwaitsFix(bugUrl = "multiple context boosting is broken, as a suggestion, contexts pair is treated as (num(context) entries)") public void testMultiContextBoosting() throws Exception { LinkedHashMap> map = new LinkedHashMap<>(); map.put("cat", ContextBuilder.category("cat").field("cat").build()); @@ -328,7 +327,8 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { CategoryQueryContext.builder().setCategory("cat1").build()) ); multiContextBoostSuggest.contexts(contextMap); - assertSuggestions("foo", multiContextBoostSuggest, "suggestion9", "suggestion6", "suggestion5", "suggestion2", "suggestion1"); + // the score of each suggestion is the maximum score among the matching contexts + assertSuggestions("foo", multiContextBoostSuggest, "suggestion9", "suggestion8", "suggestion5", "suggestion6", "suggestion4"); } public void testSeveralContexts() throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/DocIdSeqNoAndTerm.java b/test/framework/src/main/java/org/elasticsearch/index/engine/DocIdSeqNoAndTerm.java new file mode 100644 index 00000000000..b24a010c1a0 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/DocIdSeqNoAndTerm.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + + +import java.util.Objects; + +/** A tuple of document id, sequence number and primary term of a document */ +public final class DocIdSeqNoAndTerm { + private final String id; + private final long seqNo; + private final long primaryTerm; + + public DocIdSeqNoAndTerm(String id, long seqNo, long primaryTerm) { + this.id = id; + this.seqNo = seqNo; + this.primaryTerm = primaryTerm; + } + + public String getId() { + return id; + } + + public long getSeqNo() { + return seqNo; + } + + public long getPrimaryTerm() { + return primaryTerm; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DocIdSeqNoAndTerm that = (DocIdSeqNoAndTerm) o; + return Objects.equals(id, that.id) && seqNo == that.seqNo && primaryTerm == that.primaryTerm; + } + + @Override + public int hashCode() { + return Objects.hash(id, seqNo, primaryTerm); + } + + @Override + public String toString() { + return "DocIdSeqNoAndTerm{" + "id='" + id + " seqNo=" + seqNo + " primaryTerm=" + primaryTerm + "}"; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 283a7b13753..86f7bd903cc 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -33,6 +33,7 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -95,11 +96,10 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -775,26 +775,41 @@ public abstract class EngineTestCase extends ESTestCase { } /** - * Gets all docId from the given engine. + * Gets a collection of tuples of docId, sequence number, and primary term of all live documents in the provided engine. */ - public static Set getDocIds(Engine engine, boolean refresh) throws IOException { + public static List getDocIds(Engine engine, boolean refresh) throws IOException { if (refresh) { engine.refresh("test_get_doc_ids"); } try (Engine.Searcher searcher = engine.acquireSearcher("test_get_doc_ids")) { - Set ids = new HashSet<>(); + List docs = new ArrayList<>(); for (LeafReaderContext leafContext : searcher.reader().leaves()) { LeafReader reader = leafContext.reader(); + NumericDocValues seqNoDocValues = reader.getNumericDocValues(SeqNoFieldMapper.NAME); + NumericDocValues primaryTermDocValues = reader.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); Bits liveDocs = reader.getLiveDocs(); for (int i = 0; i < reader.maxDoc(); i++) { if (liveDocs == null || liveDocs.get(i)) { Document uuid = reader.document(i, Collections.singleton(IdFieldMapper.NAME)); BytesRef binaryID = uuid.getBinaryValue(IdFieldMapper.NAME); - ids.add(Uid.decodeId(Arrays.copyOfRange(binaryID.bytes, binaryID.offset, binaryID.offset + binaryID.length))); + String id = Uid.decodeId(Arrays.copyOfRange(binaryID.bytes, binaryID.offset, binaryID.offset + binaryID.length)); + final long primaryTerm; + if (primaryTermDocValues.advanceExact(i)) { + primaryTerm = primaryTermDocValues.longValue(); + } else { + primaryTerm = 0; // non-root documents of a nested document. + } + if (seqNoDocValues.advanceExact(i) == false) { + throw new AssertionError("seqNoDocValues not found for doc[" + i + "] id[" + id + "]"); + } + final long seqNo = seqNoDocValues.longValue(); + docs.add(new DocIdSeqNoAndTerm(id, seqNo, primaryTerm)); } } } - return ids; + docs.sort(Comparator.comparing(DocIdSeqNoAndTerm::getId) + .thenComparingLong(DocIdSeqNoAndTerm::getSeqNo).thenComparingLong(DocIdSeqNoAndTerm::getPrimaryTerm)); + return docs; } } @@ -818,7 +833,8 @@ public abstract class EngineTestCase extends ESTestCase { * Asserts the provided engine has a consistent document history between translog and Lucene index. */ public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine engine, MapperService mapper) throws IOException { - if (mapper.documentMapper() == null || engine.config().getIndexSettings().isSoftDeleteEnabled() == false) { + if (mapper.documentMapper() == null || engine.config().getIndexSettings().isSoftDeleteEnabled() == false + || (engine instanceof InternalEngine) == false) { return; } final long maxSeqNo = ((InternalEngine) engine).getLocalCheckpointTracker().getMaxSeqNo(); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index 28767cb34d7..42eab104d6a 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -89,6 +89,17 @@ public abstract class FieldTypeTestCase extends ESTestCase { other.setIndexAnalyzer(new NamedAnalyzer("foo", AnalyzerScope.INDEX, new StandardAnalyzer())); } }, + // check that we can update if the analyzer is unchanged + new Modifier("analyzer", true) { + @Override + public void modify(MappedFieldType ft) { + ft.setIndexAnalyzer(new NamedAnalyzer("foo", AnalyzerScope.INDEX, new StandardAnalyzer())); + } + @Override + public void normalizeOther(MappedFieldType other) { + other.setIndexAnalyzer(new NamedAnalyzer("foo", AnalyzerScope.INDEX, new StandardAnalyzer())); + } + }, new Modifier("search_analyzer", true) { @Override public void modify(MappedFieldType ft) { @@ -137,6 +148,17 @@ public abstract class FieldTypeTestCase extends ESTestCase { other.setSimilarity(new SimilarityProvider("bar", new BM25Similarity())); } }, + // check that we can update if the similarity is unchanged + new Modifier("similarity", true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSimilarity(new SimilarityProvider("foo", new BM25Similarity())); + } + @Override + public void normalizeOther(MappedFieldType other) { + other.setSimilarity(new SimilarityProvider("foo", new BM25Similarity())); + } + }, new Modifier("eager_global_ordinals", true) { @Override public void modify(MappedFieldType ft) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 8717d7ba146..5f0909db0d3 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -443,6 +443,10 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase return primary; } + public synchronized void reinitPrimaryShard() throws IOException { + primary = reinitShard(primary); + } + public void syncGlobalCheckpoint() { PlainActionFuture listener = new PlainActionFuture<>(); try { diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index ca2156144b3..78ce5bc500c 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -49,6 +49,7 @@ import org.elasticsearch.index.MapperTestUtils; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.cache.query.DisabledQueryCache; +import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineTestCase; @@ -82,12 +83,14 @@ import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; import java.util.function.Consumer; +import java.util.stream.Collectors; import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; import static org.hamcrest.Matchers.contains; @@ -123,7 +126,7 @@ public abstract class IndexShardTestCase extends ESTestCase { }; protected ThreadPool threadPool; - private long primaryTerm; + protected long primaryTerm; @Override public void setUp() throws Exception { @@ -451,15 +454,20 @@ public abstract class IndexShardTestCase extends ESTestCase { closeShards(Arrays.asList(shards)); } + protected void closeShard(IndexShard shard, boolean assertConsistencyBetweenTranslogAndLucene) throws IOException { + try { + if (assertConsistencyBetweenTranslogAndLucene) { + assertConsistentHistoryBetweenTranslogAndLucene(shard); + } + } finally { + IOUtils.close(() -> shard.close("test", false), shard.store()); + } + } + protected void closeShards(Iterable shards) throws IOException { for (IndexShard shard : shards) { if (shard != null) { - try { - assertConsistentHistoryBetweenTranslogAndLucene(shard); - shard.close("test", false); - } finally { - IOUtils.close(shard.store()); - } + closeShard(shard, true); } } } @@ -635,7 +643,11 @@ public abstract class IndexShardTestCase extends ESTestCase { return result; } - protected Set getShardDocUIDs(final IndexShard shard) throws IOException { + public static Set getShardDocUIDs(final IndexShard shard) throws IOException { + return getDocIdAndSeqNos(shard).stream().map(DocIdSeqNoAndTerm::getId).collect(Collectors.toSet()); + } + + public static List getDocIdAndSeqNos(final IndexShard shard) throws IOException { return EngineTestCase.getDocIds(shard.getEngine(), true); } @@ -741,7 +753,8 @@ public abstract class IndexShardTestCase extends ESTestCase { Index index = shard.shardId().getIndex(); IndexId indexId = new IndexId(index.getName(), index.getUUID()); - repository.snapshotShard(shard, snapshot.getSnapshotId(), indexId, indexCommitRef.getIndexCommit(), snapshotStatus); + repository.snapshotShard(shard, shard.store(), snapshot.getSnapshotId(), indexId, indexCommitRef.getIndexCommit(), + snapshotStatus); } final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 68a862c109d..52f234c9690 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -125,6 +125,7 @@ import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.MergeSchedulerConfig; import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; import org.elasticsearch.index.engine.Segment; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -132,6 +133,7 @@ import org.elasticsearch.index.mapper.MockFieldFilterPlugin; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; @@ -204,6 +206,8 @@ import static org.elasticsearch.client.Requests.syncedFlushRequest; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.util.CollectionUtils.eagerPartition; +import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; +import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.XContentTestUtils.convertToMap; import static org.elasticsearch.test.XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder; @@ -1806,7 +1810,9 @@ public abstract class ESIntegTestCase extends ESTestCase { // wait short time for other active shards before actually deleting, default 30s not needed in tests .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS)) // randomly enable low-level search cancellation to make sure it does not alter results - .put(SearchService.LOW_LEVEL_CANCELLATION_SETTING.getKey(), randomBoolean()); + .put(SearchService.LOW_LEVEL_CANCELLATION_SETTING.getKey(), randomBoolean()) + .putList(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey()) // empty list disables a port scan for other nodes + .putList(DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "file"); if (rarely()) { // Sometimes adjust the minimum search thread pool size, causing // QueueResizingEsThreadPoolExecutor to be used instead of a regular @@ -1919,7 +1925,7 @@ public abstract class ESIntegTestCase extends ESTestCase { networkSettings.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); } - NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { + return new NodeConfigurationSource() { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() @@ -1953,7 +1959,6 @@ public abstract class ESIntegTestCase extends ESTestCase { return Collections.unmodifiableCollection(plugins); } }; - return nodeConfigurationSource; } /** @@ -2027,7 +2032,7 @@ public abstract class ESIntegTestCase extends ESTestCase { public static final class TestSeedPlugin extends Plugin { @Override public List> getSettings() { - return Arrays.asList(INDEX_TEST_SEED_SETTING); + return Collections.singletonList(INDEX_TEST_SEED_SETTING); } } @@ -2380,6 +2385,49 @@ public abstract class ESIntegTestCase extends ESTestCase { }); } + /** + * Asserts that all shards with the same shardId should have document Ids. + */ + public void assertSameDocIdsOnShards() throws Exception { + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + for (ObjectObjectCursor indexRoutingTable : state.routingTable().indicesRouting()) { + for (IntObjectCursor indexShardRoutingTable : indexRoutingTable.value.shards()) { + ShardRouting primaryShardRouting = indexShardRoutingTable.value.primaryShard(); + if (primaryShardRouting == null || primaryShardRouting.assignedToNode() == false) { + continue; + } + DiscoveryNode primaryNode = state.nodes().get(primaryShardRouting.currentNodeId()); + IndexShard primaryShard = internalCluster().getInstance(IndicesService.class, primaryNode.getName()) + .indexServiceSafe(primaryShardRouting.index()).getShard(primaryShardRouting.id()); + final List docsOnPrimary; + try { + docsOnPrimary = IndexShardTestCase.getDocIdAndSeqNos(primaryShard); + } catch (AlreadyClosedException ex) { + continue; + } + for (ShardRouting replicaShardRouting : indexShardRoutingTable.value.replicaShards()) { + if (replicaShardRouting.assignedToNode() == false) { + continue; + } + DiscoveryNode replicaNode = state.nodes().get(replicaShardRouting.currentNodeId()); + IndexShard replicaShard = internalCluster().getInstance(IndicesService.class, replicaNode.getName()) + .indexServiceSafe(replicaShardRouting.index()).getShard(replicaShardRouting.id()); + final List docsOnReplica; + try { + docsOnReplica = IndexShardTestCase.getDocIdAndSeqNos(replicaShard); + } catch (AlreadyClosedException ex) { + continue; + } + assertThat("out of sync shards: primary=[" + primaryShardRouting + "] num_docs_on_primary=[" + docsOnPrimary.size() + + "] vs replica=[" + replicaShardRouting + "] num_docs_on_replica=[" + docsOnReplica.size() + "]", + docsOnReplica, equalTo(docsOnPrimary)); + } + } + } + }); + } + public static boolean inFipsJvm() { return Security.getProviders()[0].getName().toLowerCase(Locale.ROOT).contains("fips"); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index d73520f91b3..bcaa4e8303f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -62,6 +62,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -197,6 +198,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { // turning on the real memory circuit breaker leads to spurious test failures. As have no full control over heap usage, we // turn it off for these tests. .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) + .putList(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey()) // empty list disables a port scan for other nodes .put(nodeSettings()) // allow test cases to provide their own settings or override these .build(); Collection> plugins = getPlugins(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 3c46acd0fbe..354cb807bb2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -47,6 +47,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; @@ -75,6 +76,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; @@ -102,6 +104,7 @@ import java.io.Closeable; import java.io.IOException; import java.io.UncheckedIOException; import java.net.InetSocketAddress; +import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; @@ -113,6 +116,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.NavigableMap; +import java.util.Objects; import java.util.Random; import java.util.Set; import java.util.TreeMap; @@ -127,10 +131,12 @@ import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.util.Collections.emptyList; import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY; import static org.apache.lucene.util.LuceneTestCase.rarely; import static org.elasticsearch.discovery.DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING; import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; +import static org.elasticsearch.discovery.zen.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE; import static org.elasticsearch.test.ESTestCase.assertBusy; import static org.elasticsearch.test.ESTestCase.awaitBusy; import static org.elasticsearch.test.ESTestCase.getTestTransportType; @@ -485,11 +491,13 @@ public final class InternalTestCluster extends TestCluster { private synchronized NodeAndClient getOrBuildRandomNode() { ensureOpen(); - NodeAndClient randomNodeAndClient = getRandomNodeAndClient(); + final NodeAndClient randomNodeAndClient = getRandomNodeAndClient(); if (randomNodeAndClient != null) { return randomNodeAndClient; } - NodeAndClient buildNode = buildNode(1); + final int ord = nextNodeId.getAndIncrement(); + final Runnable onTransportServiceStarted = () -> {}; // do not create unicast host file for this one node. + final NodeAndClient buildNode = buildNode(ord, random.nextLong(), null, false, 1, onTransportServiceStarted); buildNode.startNode(); publishNode(buildNode); return buildNode; @@ -561,20 +569,11 @@ public final class InternalTestCluster extends TestCluster { * * @param settings the settings to use * @param defaultMinMasterNodes min_master_nodes value to use if min_master_nodes is auto managed + * @param onTransportServiceStarted callback to run when transport service is started */ - private NodeAndClient buildNode(Settings settings, int defaultMinMasterNodes) { + private NodeAndClient buildNode(Settings settings, int defaultMinMasterNodes, Runnable onTransportServiceStarted) { int ord = nextNodeId.getAndIncrement(); - return buildNode(ord, random.nextLong(), settings, false, defaultMinMasterNodes); - } - - /** - * builds a new node with default settings - * - * @param defaultMinMasterNodes min_master_nodes value to use if min_master_nodes is auto managed - */ - private NodeAndClient buildNode(int defaultMinMasterNodes) { - int ord = nextNodeId.getAndIncrement(); - return buildNode(ord, random.nextLong(), null, false, defaultMinMasterNodes); + return buildNode(ord, random.nextLong(), settings, false, defaultMinMasterNodes, onTransportServiceStarted); } /** @@ -586,15 +585,17 @@ public final class InternalTestCluster extends TestCluster { * @param reuseExisting if a node with the same name is already part of {@link #nodes}, no new node will be built and * the method will return the existing one * @param defaultMinMasterNodes min_master_nodes value to use if min_master_nodes is auto managed + * @param onTransportServiceStarted callback to run when transport service is started */ private NodeAndClient buildNode(int nodeId, long seed, Settings settings, - boolean reuseExisting, int defaultMinMasterNodes) { + boolean reuseExisting, int defaultMinMasterNodes, Runnable onTransportServiceStarted) { assert Thread.holdsLock(this); ensureOpen(); settings = getSettings(nodeId, seed, settings); Collection> plugins = getPlugins(); String name = buildNodeName(nodeId, settings); if (reuseExisting && nodes.containsKey(name)) { + onTransportServiceStarted.run(); // reusing an existing node implies its transport service already started return nodes.get(name); } else { assert reuseExisting == true || nodes.containsKey(name) == false : @@ -630,6 +631,12 @@ public final class InternalTestCluster extends TestCluster { plugins, nodeConfigurationSource.nodeConfigPath(nodeId), forbidPrivateIndexSettings); + node.injector().getInstance(TransportService.class).addLifecycleListener(new LifecycleListener() { + @Override + public void afterStart() { + onTransportServiceStarted.run(); + } + }); try { IOUtils.close(secureSettings); } catch (IOException e) { @@ -906,14 +913,15 @@ public final class InternalTestCluster extends TestCluster { if (!node.isClosed()) { closeNode(); } - recreateNodeOnRestart(callback, clearDataIfNeeded, minMasterNodes); + recreateNodeOnRestart(callback, clearDataIfNeeded, minMasterNodes, () -> rebuildUnicastHostFiles(emptyList())); startNode(); } /** * rebuilds a new node object using the current node settings and starts it */ - void recreateNodeOnRestart(RestartCallback callback, boolean clearDataIfNeeded, int minMasterNodes) throws Exception { + void recreateNodeOnRestart(RestartCallback callback, boolean clearDataIfNeeded, int minMasterNodes, + Runnable onTransportServiceStarted) throws Exception { assert callback != null; Settings callbackSettings = callback.onNodeStopped(name); Settings.Builder newSettings = Settings.builder(); @@ -927,7 +935,7 @@ public final class InternalTestCluster extends TestCluster { if (clearDataIfNeeded) { clearDataIfNeeded(callback); } - createNewNode(newSettings.build()); + createNewNode(newSettings.build(), onTransportServiceStarted); // make sure cached client points to new node resetClient(); } @@ -943,7 +951,7 @@ public final class InternalTestCluster extends TestCluster { } } - private void createNewNode(final Settings newSettings) { + private void createNewNode(final Settings newSettings, final Runnable onTransportServiceStarted) { final long newIdSeed = NodeEnvironment.NODE_ID_SEED_SETTING.get(node.settings()) + 1; // use a new seed to make sure we have new node id Settings finalSettings = Settings.builder().put(node.originalSettings()).put(newSettings).put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build(); if (DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(finalSettings) == false) { @@ -952,6 +960,12 @@ public final class InternalTestCluster extends TestCluster { } Collection> plugins = node.getClasspathPlugins(); node = new MockNode(finalSettings, plugins); + node.injector().getInstance(TransportService.class).addLifecycleListener(new LifecycleListener() { + @Override + public void afterStart() { + onTransportServiceStarted.run(); + } + }); markNodeDataDirsAsNotEligableForWipe(node); } @@ -1054,11 +1068,13 @@ public final class InternalTestCluster extends TestCluster { final int numberOfMasterNodes = numSharedDedicatedMasterNodes > 0 ? numSharedDedicatedMasterNodes : numSharedDataNodes; final int defaultMinMasterNodes = (numberOfMasterNodes / 2) + 1; final List toStartAndPublish = new ArrayList<>(); // we want to start nodes in one go due to min master nodes + final Runnable onTransportServiceStarted = () -> rebuildUnicastHostFiles(toStartAndPublish); for (int i = 0; i < numSharedDedicatedMasterNodes; i++) { final Settings.Builder settings = Settings.builder(); settings.put(Node.NODE_MASTER_SETTING.getKey(), true); settings.put(Node.NODE_DATA_SETTING.getKey(), false); - NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes); + NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes, + onTransportServiceStarted); toStartAndPublish.add(nodeAndClient); } for (int i = numSharedDedicatedMasterNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes; i++) { @@ -1068,14 +1084,16 @@ public final class InternalTestCluster extends TestCluster { settings.put(Node.NODE_MASTER_SETTING.getKey(), false).build(); settings.put(Node.NODE_DATA_SETTING.getKey(), true).build(); } - NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes); + NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes, + onTransportServiceStarted); toStartAndPublish.add(nodeAndClient); } for (int i = numSharedDedicatedMasterNodes + numSharedDataNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) { final Builder settings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false) .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false); - NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes); + NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes, + onTransportServiceStarted); toStartAndPublish.add(nodeAndClient); } @@ -1199,7 +1217,9 @@ public final class InternalTestCluster extends TestCluster { for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { try { - IndexShardTestCase.getTranslog(indexShard).getDeletionPolicy().assertNoOpenTranslogRefs(); + if (IndexShardTestCase.getEngine(indexShard) instanceof InternalEngine) { + IndexShardTestCase.getTranslog(indexShard).getDeletionPolicy().assertNoOpenTranslogRefs(); + } } catch (AlreadyClosedException ok) { // all good } @@ -1426,6 +1446,7 @@ public final class InternalTestCluster extends TestCluster { updateMinMasterNodes(currentMasters + newMasters); } List> futures = nodeAndClients.stream().map(node -> executor.submit(node::startNode)).collect(Collectors.toList()); + try { for (Future future : futures) { future.get(); @@ -1446,6 +1467,30 @@ public final class InternalTestCluster extends TestCluster { } } + private final Object discoveryFileMutex = new Object(); + + private void rebuildUnicastHostFiles(Collection newNodes) { + // cannot be a synchronized method since it's called on other threads from within synchronized startAndPublishNodesAndClients() + synchronized (discoveryFileMutex) { + try { + List discoveryFileContents = Stream.concat(nodes.values().stream(), newNodes.stream()) + .map(nac -> nac.node.injector().getInstance(TransportService.class)).filter(Objects::nonNull) + .map(TransportService::getLocalNode).filter(Objects::nonNull).filter(DiscoveryNode::isMasterNode) + .map(n -> n.getAddress().toString()) + .distinct().collect(Collectors.toList()); + Set configPaths = Stream.concat(nodes.values().stream(), newNodes.stream()) + .map(nac -> nac.node.getEnvironment().configFile()).collect(Collectors.toSet()); + logger.debug("configuring discovery with {} at {}", discoveryFileContents, configPaths); + for (final Path configPath : configPaths) { + Files.createDirectories(configPath); + Files.write(configPath.resolve(UNICAST_HOSTS_FILE), discoveryFileContents); + } + } catch (IOException e) { + throw new AssertionError("failed to configure file-based discovery", e); + } + } + } + private synchronized void stopNodesAndClient(NodeAndClient nodeAndClient) throws IOException { stopNodesAndClients(Collections.singleton(nodeAndClient)); } @@ -1604,7 +1649,7 @@ public final class InternalTestCluster extends TestCluster { for (List sameRoleNodes : nodesByRoles.values()) { Collections.shuffle(sameRoleNodes, random); } - List startUpOrder = new ArrayList<>(); + final List startUpOrder = new ArrayList<>(); for (Set roles : rolesOrderedByOriginalStartupOrder) { if (roles == null) { // if some nodes were stopped, we want have a role for that ordinal @@ -1615,11 +1660,11 @@ public final class InternalTestCluster extends TestCluster { } assert nodesByRoles.values().stream().collect(Collectors.summingInt(List::size)) == 0; - // do two rounds to minimize pinging (mock zen pings pings with no delay and can create a lot of logs) for (NodeAndClient nodeAndClient : startUpOrder) { logger.info("resetting node [{}] ", nodeAndClient.name); // we already cleared data folders, before starting nodes up - nodeAndClient.recreateNodeOnRestart(callback, false, autoManageMinMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1); + nodeAndClient.recreateNodeOnRestart(callback, false, autoManageMinMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1, + () -> rebuildUnicastHostFiles(startUpOrder)); } startAndPublishNodesAndClients(startUpOrder); @@ -1738,9 +1783,9 @@ public final class InternalTestCluster extends TestCluster { } else { defaultMinMasterNodes = -1; } - List nodes = new ArrayList<>(); - for (Settings nodeSettings: settings) { - nodes.add(buildNode(nodeSettings, defaultMinMasterNodes)); + final List nodes = new ArrayList<>(); + for (Settings nodeSettings : settings) { + nodes.add(buildNode(nodeSettings, defaultMinMasterNodes, () -> rebuildUnicastHostFiles(nodes))); } startAndPublishNodesAndClients(nodes); if (autoManageMinMasterNodes) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java deleted file mode 100644 index dc9304637cd..00000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.test.discovery; - -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.discovery.zen.UnicastHostsProvider; - -import java.io.Closeable; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.function.Supplier; -import java.util.stream.Collectors; - -/** - * A {@link UnicastHostsProvider} implementation which returns results based on a static in-memory map. This allows running - * with nodes that only determine their transport address at runtime, which is the default behavior of - * {@link org.elasticsearch.test.InternalTestCluster} - */ -public final class MockUncasedHostProvider implements UnicastHostsProvider, Closeable { - - static final Map> activeNodesPerCluster = new HashMap<>(); - - - private final Supplier localNodeSupplier; - private final ClusterName clusterName; - - public MockUncasedHostProvider(Supplier localNodeSupplier, ClusterName clusterName) { - this.localNodeSupplier = localNodeSupplier; - this.clusterName = clusterName; - synchronized (activeNodesPerCluster) { - getActiveNodesForCurrentCluster().add(this); - } - } - - @Override - public List buildDynamicHosts(HostsResolver hostsResolver) { - final DiscoveryNode localNode = getNode(); - assert localNode != null; - synchronized (activeNodesPerCluster) { - Set activeNodes = getActiveNodesForCurrentCluster(); - return activeNodes.stream() - .map(MockUncasedHostProvider::getNode) - .filter(Objects::nonNull) - .filter(n -> localNode.equals(n) == false) - .map(DiscoveryNode::getAddress) - .collect(Collectors.toList()); - } - } - - @Nullable - private DiscoveryNode getNode() { - return localNodeSupplier.get(); - } - - private Set getActiveNodesForCurrentCluster() { - assert Thread.holdsLock(activeNodesPerCluster); - return activeNodesPerCluster.computeIfAbsent(clusterName, - clusterName -> ConcurrentCollections.newConcurrentSet()); - } - - @Override - public void close() { - synchronized (activeNodesPerCluster) { - boolean found = getActiveNodesForCurrentCluster().remove(this); - assert found; - } - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java index 5387a659aa2..2c8305b4e12 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java @@ -19,13 +19,10 @@ package org.elasticsearch.test.discovery; -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -39,7 +36,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; @@ -59,7 +55,6 @@ public class TestZenDiscovery extends ZenDiscovery { /** A plugin which installs mock discovery and configures it to be used. */ public static class TestPlugin extends Plugin implements DiscoveryPlugin { protected final Settings settings; - private final SetOnce unicastHostProvider = new SetOnce<>(); public TestPlugin(Settings settings) { this.settings = settings; } @@ -78,26 +73,6 @@ public class TestZenDiscovery extends ZenDiscovery { clusterApplier, clusterSettings, hostsProvider, allocationService)); } - @Override - public Map> getZenHostsProviders(TransportService transportService, - NetworkService networkService) { - final Supplier supplier; - if (USE_MOCK_PINGS.get(settings)) { - // we have to return something in order for the unicast host provider setting to resolve to something. It will never be used - supplier = () -> hostsResolver -> { - throw new UnsupportedOperationException(); - }; - } else { - supplier = () -> { - unicastHostProvider.set( - new MockUncasedHostProvider(transportService::getLocalNode, ClusterName.CLUSTER_NAME_SETTING.get(settings)) - ); - return unicastHostProvider.get(); - }; - } - return Collections.singletonMap("test-zen", supplier); - } - @Override public List> getSettings() { return Collections.singletonList(USE_MOCK_PINGS); @@ -107,18 +82,9 @@ public class TestZenDiscovery extends ZenDiscovery { public Settings additionalSettings() { return Settings.builder() .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "test-zen") - .put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "test-zen") .putList(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey()) .build(); } - - @Override - public void close() throws IOException { - super.close(); - if (unicastHostProvider.get() != null) { - unicastHostProvider.get().close(); - } - } } private TestZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, diff --git a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java new file mode 100644 index 00000000000..7e73e795b8a --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.test.rest.ESRestTestCase; + +public abstract class AbstractFullClusterRestartTestCase extends ESRestTestCase { + + private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); + + public final boolean isRunningAgainstOldCluster() { + return runningAgainstOldCluster; + } + + private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); + + public final Version getOldClusterVersion() { + return oldClusterVersion; + } + + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @Override + protected boolean preserveSnapshotsUponCompletion() { + return true; + } + + @Override + protected boolean preserveReposUponCompletion() { + return true; + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + @Override + protected boolean preserveClusterSettings() { + return true; + } + +} diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle index d4fe9ee554c..e2c772d7088 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle @@ -47,7 +47,7 @@ followClusterTestCluster { setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' - setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.monitoring.collection.enabled', 'true' extraConfigFile 'roles.yml', 'roles.yml' setupCommand 'setupTestAdmin', 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java index d8357a74e8e..43b16727aac 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java @@ -29,7 +29,9 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; public class FollowIndexSecurityIT extends ESRestTestCase { @@ -80,6 +82,7 @@ public class FollowIndexSecurityIT extends ESRestTestCase { createAndFollowIndex("leader_cluster:" + allowedIndex, allowedIndex); assertBusy(() -> verifyDocuments(client(), allowedIndex, numDocs)); assertThat(countCcrNodeTasks(), equalTo(1)); + assertBusy(() -> verifyCcrMonitoring(allowedIndex, allowedIndex)); assertOK(client().performRequest(new Request("POST", "/" + allowedIndex + "/_ccr/unfollow"))); // Make sure that there are no other ccr relates operations running: assertBusy(() -> { @@ -110,7 +113,7 @@ public class FollowIndexSecurityIT extends ESRestTestCase { e = expectThrows(ResponseException.class, () -> followIndex("leader_cluster:" + unallowedIndex, unallowedIndex)); - assertThat(e.getMessage(), containsString("follow index [" + unallowedIndex + "] does not exist")); + assertThat(e.getMessage(), containsString("action [indices:monitor/stats] is unauthorized for user [test_ccr]")); assertThat(indexExists(adminClient(), unallowedIndex), is(false)); assertBusy(() -> assertThat(countCcrNodeTasks(), equalTo(0))); } @@ -203,4 +206,46 @@ public class FollowIndexSecurityIT extends ESRestTestCase { return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); } + private static void verifyCcrMonitoring(String expectedLeaderIndex, String expectedFollowerIndex) throws IOException { + ensureYellow(".monitoring-*"); + + Request request = new Request("GET", "/.monitoring-*/_search"); + request.setJsonEntity("{\"query\": {\"term\": {\"ccr_stats.leader_index\": \"leader_cluster:" + expectedLeaderIndex + "\"}}}"); + Map response = toMap(adminClient().performRequest(request)); + + int numberOfOperationsReceived = 0; + int numberOfOperationsIndexed = 0; + + List hits = (List) XContentMapValues.extractValue("hits.hits", response); + assertThat(hits.size(), greaterThanOrEqualTo(1)); + + for (int i = 0; i < hits.size(); i++) { + Map hit = (Map) hits.get(i); + String leaderIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.leader_index", hit); + assertThat(leaderIndex, endsWith(expectedLeaderIndex)); + + final String followerIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.follower_index", hit); + assertThat(followerIndex, equalTo(expectedFollowerIndex)); + + int foundNumberOfOperationsReceived = + (int) XContentMapValues.extractValue("_source.ccr_stats.operations_received", hit); + numberOfOperationsReceived = Math.max(numberOfOperationsReceived, foundNumberOfOperationsReceived); + int foundNumberOfOperationsIndexed = + (int) XContentMapValues.extractValue("_source.ccr_stats.number_of_operations_indexed", hit); + numberOfOperationsIndexed = Math.max(numberOfOperationsIndexed, foundNumberOfOperationsIndexed); + } + + assertThat(numberOfOperationsReceived, greaterThanOrEqualTo(1)); + assertThat(numberOfOperationsIndexed, greaterThanOrEqualTo(1)); + } + + private static void ensureYellow(String index) throws IOException { + Request request = new Request("GET", "/_cluster/health/" + index); + request.addParameter("wait_for_status", "yellow"); + request.addParameter("wait_for_no_relocating_shards", "true"); + request.addParameter("timeout", "70s"); + request.addParameter("level", "shards"); + adminClient().performRequest(request); + } + } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index 396c247af40..b3b63723848 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -27,6 +27,7 @@ followClusterTestCluster { dependsOn leaderClusterTestRunner numNodes = 1 clusterName = 'follow-cluster' + setting 'xpack.monitoring.collection.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index 76d0e438135..5c1c3915044 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -24,7 +24,9 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class FollowIndexIT extends ESRestTestCase { @@ -75,6 +77,7 @@ public class FollowIndexIT extends ESRestTestCase { index(leaderClient, leaderIndexName, Integer.toString(id + 2), "field", id + 2, "filtered_field", "true"); } assertBusy(() -> verifyDocuments(followIndexName, numDocs + 3)); + assertBusy(() -> verifyCcrMonitoring(leaderIndexName, followIndexName)); } } @@ -104,6 +107,7 @@ public class FollowIndexIT extends ESRestTestCase { ensureYellow("logs-20190101"); verifyDocuments("logs-20190101", 5); }); + assertBusy(() -> verifyCcrMonitoring("logs-20190101", "logs-20190101")); } private static void index(RestClient client, String index, String id, Object... fields) throws IOException { @@ -155,6 +159,39 @@ public class FollowIndexIT extends ESRestTestCase { } } + private static void verifyCcrMonitoring(final String expectedLeaderIndex, final String expectedFollowerIndex) throws IOException { + ensureYellow(".monitoring-*"); + + Request request = new Request("GET", "/.monitoring-*/_search"); + request.setJsonEntity("{\"query\": {\"term\": {\"ccr_stats.leader_index\": \"leader_cluster:" + expectedLeaderIndex + "\"}}}"); + Map response = toMap(client().performRequest(request)); + + int numberOfOperationsReceived = 0; + int numberOfOperationsIndexed = 0; + + List hits = (List) XContentMapValues.extractValue("hits.hits", response); + assertThat(hits.size(), greaterThanOrEqualTo(1)); + + for (int i = 0; i < hits.size(); i++) { + Map hit = (Map) hits.get(i); + String leaderIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.leader_index", hit); + assertThat(leaderIndex, endsWith(expectedLeaderIndex)); + + final String followerIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.follower_index", hit); + assertThat(followerIndex, equalTo(expectedFollowerIndex)); + + int foundNumberOfOperationsReceived = + (int) XContentMapValues.extractValue("_source.ccr_stats.operations_received", hit); + numberOfOperationsReceived = Math.max(numberOfOperationsReceived, foundNumberOfOperationsReceived); + int foundNumberOfOperationsIndexed = + (int) XContentMapValues.extractValue("_source.ccr_stats.number_of_operations_indexed", hit); + numberOfOperationsIndexed = Math.max(numberOfOperationsIndexed, foundNumberOfOperationsIndexed); + } + + assertThat(numberOfOperationsReceived, greaterThanOrEqualTo(1)); + assertThat(numberOfOperationsIndexed, greaterThanOrEqualTo(1)); + } + private static Map toMap(Response response) throws IOException { return toMap(EntityUtils.toString(response.getEntity())); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 353a66db263..4e4caf8500f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -40,19 +40,17 @@ import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator; -import org.elasticsearch.xpack.ccr.action.CcrStatsAction; -import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; -import org.elasticsearch.xpack.ccr.action.DeleteAutoFollowPatternAction; -import org.elasticsearch.xpack.ccr.action.FollowIndexAction; -import org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction; +import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; +import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.action.ShardChangesAction; -import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; import org.elasticsearch.xpack.ccr.action.ShardFollowTask; import org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor; import org.elasticsearch.xpack.ccr.action.TransportCcrStatsAction; +import org.elasticsearch.xpack.ccr.action.TransportCreateAndFollowIndexAction; import org.elasticsearch.xpack.ccr.action.TransportDeleteAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.action.TransportFollowIndexAction; import org.elasticsearch.xpack.ccr.action.TransportPutAutoFollowPatternAction; -import org.elasticsearch.xpack.ccr.action.UnfollowIndexAction; +import org.elasticsearch.xpack.ccr.action.TransportUnfollowIndexAction; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsAction; import org.elasticsearch.xpack.ccr.action.bulk.TransportBulkShardOperationsAction; import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory; @@ -63,6 +61,11 @@ import org.elasticsearch.xpack.ccr.rest.RestFollowIndexAction; import org.elasticsearch.xpack.ccr.rest.RestPutAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.rest.RestUnfollowIndexAction; import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.UnfollowIndexAction; import java.util.Arrays; import java.util.Collection; @@ -73,8 +76,8 @@ import java.util.Optional; import java.util.function.Supplier; import static java.util.Collections.emptyList; -import static org.elasticsearch.xpack.ccr.CcrSettings.CCR_ENABLED_SETTING; import static org.elasticsearch.xpack.ccr.CcrSettings.CCR_FOLLOWING_INDEX_SETTING; +import static org.elasticsearch.xpack.core.XPackSettings.CCR_ENABLED_SETTING; /** * Container class for CCR functionality. @@ -82,6 +85,8 @@ import static org.elasticsearch.xpack.ccr.CcrSettings.CCR_FOLLOWING_INDEX_SETTIN public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, EnginePlugin { public static final String CCR_THREAD_POOL_NAME = "ccr"; + public static final String CCR_CUSTOM_METADATA_KEY = "ccr"; + public static final String CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS = "leader_index_shard_history_uuids"; private final boolean enabled; private final Settings settings; @@ -148,9 +153,9 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E // stats action new ActionHandler<>(CcrStatsAction.INSTANCE, TransportCcrStatsAction.class), // follow actions - new ActionHandler<>(CreateAndFollowIndexAction.INSTANCE, CreateAndFollowIndexAction.TransportAction.class), - new ActionHandler<>(FollowIndexAction.INSTANCE, FollowIndexAction.TransportAction.class), - new ActionHandler<>(UnfollowIndexAction.INSTANCE, UnfollowIndexAction.TransportAction.class), + new ActionHandler<>(CreateAndFollowIndexAction.INSTANCE, TransportCreateAndFollowIndexAction.class), + new ActionHandler<>(FollowIndexAction.INSTANCE, TransportFollowIndexAction.class), + new ActionHandler<>(UnfollowIndexAction.INSTANCE, TransportUnfollowIndexAction.class), // auto-follow actions new ActionHandler<>(DeleteAutoFollowPatternAction.INSTANCE, TransportDeleteAutoFollowPatternAction.class), new ActionHandler<>(PutAutoFollowPatternAction.INSTANCE, TransportPutAutoFollowPatternAction.class)); @@ -160,6 +165,10 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, Supplier nodesInCluster) { + if (enabled == false) { + return emptyList(); + } + return Arrays.asList( // stats API new RestCcrStatsAction(settings, restController), @@ -179,8 +188,8 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E ShardFollowTask::new), // Task statuses - new NamedWriteableRegistry.Entry(Task.Status.class, ShardFollowNodeTask.Status.STATUS_PARSER_NAME, - ShardFollowNodeTask.Status::new) + new NamedWriteableRegistry.Entry(Task.Status.class, ShardFollowNodeTaskStatus.STATUS_PARSER_NAME, + ShardFollowNodeTaskStatus::new) ); } @@ -192,9 +201,9 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E // Task statuses new NamedXContentRegistry.Entry( - ShardFollowNodeTask.Status.class, - new ParseField(ShardFollowNodeTask.Status.STATUS_PARSER_NAME), - ShardFollowNodeTask.Status::fromXContent)); + ShardFollowNodeTaskStatus.class, + new ParseField(ShardFollowNodeTaskStatus.STATUS_PARSER_NAME), + ShardFollowNodeTaskStatus::fromXContent)); } /** @@ -225,10 +234,7 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E return Collections.emptyList(); } - FixedExecutorBuilder ccrTp = new FixedExecutorBuilder(settings, CCR_THREAD_POOL_NAME, - 32, 100, "xpack.ccr.ccr_thread_pool"); - - return Collections.singletonList(ccrTp); + return Collections.singletonList(new FixedExecutorBuilder(settings, CCR_THREAD_POOL_NAME, 32, 100, "xpack.ccr.ccr_thread_pool")); } protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java index f9a5d8fe830..2161d0a1423 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java @@ -10,9 +10,18 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.index.engine.CommitStats; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestStatus; @@ -21,6 +30,7 @@ import org.elasticsearch.xpack.core.XPackPlugin; import java.util.Collections; import java.util.Locale; import java.util.Objects; +import java.util.function.BiConsumer; import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; @@ -58,23 +68,24 @@ public final class CcrLicenseChecker { } /** - * Fetches the leader index metadata from the remote cluster. Before fetching the index metadata, the remote cluster is checked for - * license compatibility with CCR. If the remote cluster is not licensed for CCR, the {@code onFailure} consumer is is invoked. - * Otherwise, the specified consumer is invoked with the leader index metadata fetched from the remote cluster. + * Fetches the leader index metadata and history UUIDs for leader index shards from the remote cluster. + * Before fetching the index metadata, the remote cluster is checked for license compatibility with CCR. + * If the remote cluster is not licensed for CCR, the {@code onFailure} consumer is is invoked. Otherwise, + * the specified consumer is invoked with the leader index metadata fetched from the remote cluster. * - * @param client the client - * @param clusterAlias the remote cluster alias - * @param leaderIndex the name of the leader index - * @param onFailure the failure consumer - * @param leaderIndexMetadataConsumer the leader index metadata consumer - * @param the type of response the listener is waiting for + * @param client the client + * @param clusterAlias the remote cluster alias + * @param leaderIndex the name of the leader index + * @param onFailure the failure consumer + * @param consumer the consumer for supplying the leader index metadata and historyUUIDs of all leader shards + * @param the type of response the listener is waiting for */ - public void checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( + public void checkRemoteClusterLicenseAndFetchLeaderIndexMetadataAndHistoryUUIDs( final Client client, final String clusterAlias, final String leaderIndex, final Consumer onFailure, - final Consumer leaderIndexMetadataConsumer) { + final BiConsumer consumer) { final ClusterStateRequest request = new ClusterStateRequest(); request.clear(); @@ -85,7 +96,13 @@ public final class CcrLicenseChecker { clusterAlias, request, onFailure, - leaderClusterState -> leaderIndexMetadataConsumer.accept(leaderClusterState.getMetaData().index(leaderIndex)), + leaderClusterState -> { + IndexMetaData leaderIndexMetaData = leaderClusterState.getMetaData().index(leaderIndex); + final Client leaderClient = client.getRemoteClusterClient(clusterAlias); + fetchLeaderHistoryUUIDs(leaderClient, leaderIndexMetaData, onFailure, historyUUIDs -> { + consumer.accept(historyUUIDs, leaderIndexMetaData); + }); + }, licenseCheck -> indexMetadataNonCompliantRemoteLicense(leaderIndex, licenseCheck), e -> indexMetadataUnknownRemoteLicense(leaderIndex, clusterAlias, e)); } @@ -168,6 +185,58 @@ public final class CcrLicenseChecker { }); } + /** + * Fetches the history UUIDs for leader index on per shard basis using the specified leaderClient. + * + * @param leaderClient the leader client + * @param leaderIndexMetaData the leader index metadata + * @param onFailure the failure consumer + * @param historyUUIDConsumer the leader index history uuid and consumer + */ + // NOTE: Placed this method here; in order to avoid duplication of logic for fetching history UUIDs + // in case of following a local or a remote cluster. + public void fetchLeaderHistoryUUIDs( + final Client leaderClient, + final IndexMetaData leaderIndexMetaData, + final Consumer onFailure, + final Consumer historyUUIDConsumer) { + + String leaderIndex = leaderIndexMetaData.getIndex().getName(); + CheckedConsumer indicesStatsHandler = indicesStatsResponse -> { + IndexStats indexStats = indicesStatsResponse.getIndices().get(leaderIndex); + String[] historyUUIDs = new String[leaderIndexMetaData.getNumberOfShards()]; + for (IndexShardStats indexShardStats : indexStats) { + for (ShardStats shardStats : indexShardStats) { + // Ignore replica shards as they may not have yet started and + // we just end up overwriting slots in historyUUIDs + if (shardStats.getShardRouting().primary() == false) { + continue; + } + + CommitStats commitStats = shardStats.getCommitStats(); + if (commitStats == null) { + onFailure.accept(new IllegalArgumentException("leader index's commit stats are missing")); + return; + } + String historyUUID = commitStats.getUserData().get(Engine.HISTORY_UUID_KEY); + ShardId shardId = shardStats.getShardRouting().shardId(); + historyUUIDs[shardId.id()] = historyUUID; + } + } + for (int i = 0; i < historyUUIDs.length; i++) { + if (historyUUIDs[i] == null) { + onFailure.accept(new IllegalArgumentException("no history uuid for [" + leaderIndex + "][" + i + "]")); + return; + } + } + historyUUIDConsumer.accept(historyUUIDs); + }; + IndicesStatsRequest request = new IndicesStatsRequest(); + request.clear(); + request.indices(leaderIndex); + leaderClient.admin().indices().stats(request, ActionListener.wrap(indicesStatsHandler, onFailure)); + } + private static ElasticsearchStatusException indexMetadataNonCompliantRemoteLicense( final String leaderIndex, final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { final String clusterAlias = licenseCheck.remoteClusterLicenseInfo().clusterAlias(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java index a942990ea5a..122f5a913d2 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ccr; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.XPackSettings; import java.util.Arrays; import java.util.List; @@ -22,11 +23,6 @@ public final class CcrSettings { } - /** - * Setting for controlling whether or not CCR is enabled. - */ - static final Setting CCR_ENABLED_SETTING = Setting.boolSetting("xpack.ccr.enabled", true, Property.NodeScope); - /** * Index setting for a following index. */ @@ -46,7 +42,7 @@ public final class CcrSettings { */ static List> getSettings() { return Arrays.asList( - CCR_ENABLED_SETTING, + XPackSettings.CCR_ENABLED_SETTING, CCR_FOLLOWING_INDEX_SETTING, CCR_AUTO_FOLLOW_POLL_INTERVAL); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index e28214341a9..722cbddde18 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -27,6 +27,8 @@ import org.elasticsearch.xpack.ccr.CcrLicenseChecker; import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import java.util.ArrayList; import java.util.HashMap; @@ -216,7 +218,7 @@ public class AutoFollowCoordinator implements ClusterStateApplier { new FollowIndexAction.Request(leaderIndexNameWithClusterAliasPrefix, followIndexName, autoFollowPattern.getMaxBatchOperationCount(), autoFollowPattern.getMaxConcurrentReadBatches(), autoFollowPattern.getMaxOperationSizeInBytes(), autoFollowPattern.getMaxConcurrentWriteBatches(), - autoFollowPattern.getMaxWriteBufferSize(), autoFollowPattern.getRetryTimeout(), + autoFollowPattern.getMaxWriteBufferSize(), autoFollowPattern.getMaxRetryDelay(), autoFollowPattern.getIdleShardRetryDelay()); // Execute if the create and follow api call succeeds: diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java deleted file mode 100644 index 223f6ed8e6d..00000000000 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ccr.action; - -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.action.support.ActiveShardsObserver; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.transport.RemoteClusterService; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.ccr.CcrLicenseChecker; -import org.elasticsearch.xpack.ccr.CcrSettings; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -public class CreateAndFollowIndexAction extends Action { - - public static final CreateAndFollowIndexAction INSTANCE = new CreateAndFollowIndexAction(); - public static final String NAME = "indices:admin/xpack/ccr/create_and_follow_index"; - - private CreateAndFollowIndexAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); - } - - public static class Request extends AcknowledgedRequest implements IndicesRequest { - - private FollowIndexAction.Request followRequest; - - public Request(FollowIndexAction.Request followRequest) { - this.followRequest = Objects.requireNonNull(followRequest); - } - - Request() { - } - - public FollowIndexAction.Request getFollowRequest() { - return followRequest; - } - - @Override - public ActionRequestValidationException validate() { - return followRequest.validate(); - } - - @Override - public String[] indices() { - return new String[]{followRequest.getFollowerIndex()}; - } - - @Override - public IndicesOptions indicesOptions() { - return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - followRequest = new FollowIndexAction.Request(); - followRequest.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - followRequest.writeTo(out); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Request request = (Request) o; - return Objects.equals(followRequest, request.followRequest); - } - - @Override - public int hashCode() { - return Objects.hash(followRequest); - } - } - - public static class Response extends ActionResponse implements ToXContentObject { - - private boolean followIndexCreated; - private boolean followIndexShardsAcked; - private boolean indexFollowingStarted; - - Response() { - } - - Response(boolean followIndexCreated, boolean followIndexShardsAcked, boolean indexFollowingStarted) { - this.followIndexCreated = followIndexCreated; - this.followIndexShardsAcked = followIndexShardsAcked; - this.indexFollowingStarted = indexFollowingStarted; - } - - public boolean isFollowIndexCreated() { - return followIndexCreated; - } - - public boolean isFollowIndexShardsAcked() { - return followIndexShardsAcked; - } - - public boolean isIndexFollowingStarted() { - return indexFollowingStarted; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - followIndexCreated = in.readBoolean(); - followIndexShardsAcked = in.readBoolean(); - indexFollowingStarted = in.readBoolean(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(followIndexCreated); - out.writeBoolean(followIndexShardsAcked); - out.writeBoolean(indexFollowingStarted); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - builder.field("follow_index_created", followIndexCreated); - builder.field("follow_index_shards_acked", followIndexShardsAcked); - builder.field("index_following_started", indexFollowingStarted); - } - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Response response = (Response) o; - return followIndexCreated == response.followIndexCreated && - followIndexShardsAcked == response.followIndexShardsAcked && - indexFollowingStarted == response.indexFollowingStarted; - } - - @Override - public int hashCode() { - return Objects.hash(followIndexCreated, followIndexShardsAcked, indexFollowingStarted); - } - } - - public static class TransportAction extends TransportMasterNodeAction { - - private final Client client; - private final AllocationService allocationService; - private final RemoteClusterService remoteClusterService; - private final ActiveShardsObserver activeShardsObserver; - private final CcrLicenseChecker ccrLicenseChecker; - - @Inject - public TransportAction( - final Settings settings, - final ThreadPool threadPool, - final TransportService transportService, - final ClusterService clusterService, - final ActionFilters actionFilters, - final IndexNameExpressionResolver indexNameExpressionResolver, - final Client client, - final AllocationService allocationService, - final CcrLicenseChecker ccrLicenseChecker) { - super(settings, NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, Request::new); - this.client = client; - this.allocationService = allocationService; - this.remoteClusterService = transportService.getRemoteClusterService(); - this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool); - this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); - } - - @Override - protected String executor() { - return ThreadPool.Names.SAME; - } - - @Override - protected Response newResponse() { - return new Response(); - } - - @Override - protected void masterOperation( - final Request request, final ClusterState state, final ActionListener listener) throws Exception { - if (ccrLicenseChecker.isCcrAllowed() == false) { - listener.onFailure(LicenseUtils.newComplianceException("ccr")); - return; - } - final String[] indices = new String[]{request.getFollowRequest().getLeaderIndex()}; - final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); - if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { - createFollowerIndexAndFollowLocalIndex(request, state, listener); - } else { - assert remoteClusterIndices.size() == 1; - final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); - assert entry.getValue().size() == 1; - final String clusterAlias = entry.getKey(); - final String leaderIndex = entry.getValue().get(0); - createFollowerIndexAndFollowRemoteIndex(request, clusterAlias, leaderIndex, listener); - } - } - - private void createFollowerIndexAndFollowLocalIndex( - final Request request, final ClusterState state, final ActionListener listener) { - // following an index in local cluster, so use local cluster state to fetch leader index metadata - final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getFollowRequest().getLeaderIndex()); - createFollowerIndex(leaderIndexMetadata, request, listener); - } - - private void createFollowerIndexAndFollowRemoteIndex( - final Request request, - final String clusterAlias, - final String leaderIndex, - final ActionListener listener) { - ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( - client, - clusterAlias, - leaderIndex, - listener::onFailure, - leaderIndexMetaData -> createFollowerIndex(leaderIndexMetaData, request, listener)); - } - - private void createFollowerIndex( - final IndexMetaData leaderIndexMetaData, final Request request, final ActionListener listener) { - if (leaderIndexMetaData == null) { - listener.onFailure(new IllegalArgumentException("leader index [" + request.getFollowRequest().getLeaderIndex() + - "] does not exist")); - return; - } - - ActionListener handler = ActionListener.wrap( - result -> { - if (result) { - initiateFollowing(request, listener); - } else { - listener.onResponse(new Response(true, false, false)); - } - }, - listener::onFailure); - // Can't use create index api here, because then index templates can alter the mappings / settings. - // And index templates could introduce settings / mappings that are incompatible with the leader index. - clusterService.submitStateUpdateTask("follow_index_action", new AckedClusterStateUpdateTask(request, handler) { - - @Override - protected Boolean newResponse(boolean acknowledged) { - return acknowledged; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - String followIndex = request.getFollowRequest().getFollowerIndex(); - IndexMetaData currentIndex = currentState.metaData().index(followIndex); - if (currentIndex != null) { - throw new ResourceAlreadyExistsException(currentIndex.getIndex()); - } - - MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); - IndexMetaData.Builder imdBuilder = IndexMetaData.builder(followIndex); - - // Copy all settings, but overwrite a few settings. - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put(leaderIndexMetaData.getSettings()); - // Overwriting UUID here, because otherwise we can't follow indices in the same cluster - settingsBuilder.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); - settingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followIndex); - settingsBuilder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - imdBuilder.settings(settingsBuilder); - - // Copy mappings from leader IMD to follow IMD - for (ObjectObjectCursor cursor : leaderIndexMetaData.getMappings()) { - imdBuilder.putMapping(cursor.value); - } - imdBuilder.setRoutingNumShards(leaderIndexMetaData.getRoutingNumShards()); - IndexMetaData followIMD = imdBuilder.build(); - mdBuilder.put(followIMD, false); - - ClusterState.Builder builder = ClusterState.builder(currentState); - builder.metaData(mdBuilder.build()); - ClusterState updatedState = builder.build(); - - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()) - .addAsNew(updatedState.metaData().index(request.getFollowRequest().getFollowerIndex())); - updatedState = allocationService.reroute( - ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), - "follow index [" + request.getFollowRequest().getFollowerIndex() + "] created"); - - logger.info("[{}] creating index, cause [ccr_create_and_follow], shards [{}]/[{}]", - followIndex, followIMD.getNumberOfShards(), followIMD.getNumberOfReplicas()); - - return updatedState; - } - }); - } - - private void initiateFollowing(Request request, ActionListener listener) { - activeShardsObserver.waitForActiveShards(new String[]{request.followRequest.getFollowerIndex()}, - ActiveShardCount.DEFAULT, request.timeout(), result -> { - if (result) { - client.execute(FollowIndexAction.INSTANCE, request.getFollowRequest(), ActionListener.wrap( - r -> listener.onResponse(new Response(true, true, r.isAcknowledged())), - listener::onFailure - )); - } else { - listener.onResponse(new Response(true, false, false)); - } - }, listener::onFailure); - } - - @Override - protected ClusterBlockException checkBlock(Request request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getFollowRequest().getFollowerIndex()); - } - - } - -} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java deleted file mode 100644 index 49822455110..00000000000 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java +++ /dev/null @@ -1,571 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ccr.action; - -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.IndexingSlowLog; -import org.elasticsearch.index.SearchSlowLog; -import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.IndicesRequestCache; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksService; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.transport.RemoteClusterService; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.ccr.CcrLicenseChecker; -import org.elasticsearch.xpack.ccr.CcrSettings; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; -import java.util.stream.Collectors; - -public class FollowIndexAction extends Action { - - public static final FollowIndexAction INSTANCE = new FollowIndexAction(); - public static final String NAME = "cluster:admin/xpack/ccr/follow_index"; - - private FollowIndexAction() { - super(NAME); - } - - @Override - public AcknowledgedResponse newResponse() { - return new AcknowledgedResponse(); - } - - public static class Request extends ActionRequest implements ToXContentObject { - - private static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index"); - private static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, - (args, followerIndex) -> { - if (args[1] != null) { - followerIndex = (String) args[1]; - } - return new Request((String) args[0], followerIndex, (Integer) args[2], (Integer) args[3], (Long) args[4], - (Integer) args[5], (Integer) args[6], (TimeValue) args[7], (TimeValue) args[8]); - }); - - static { - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), LEADER_INDEX_FIELD); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOWER_INDEX_FIELD); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_BATCH_OPERATION_COUNT); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_CONCURRENT_READ_BATCHES); - PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_BATCH_SIZE_IN_BYTES); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_CONCURRENT_WRITE_BATCHES); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_WRITE_BUFFER_SIZE); - PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), ShardFollowTask.RETRY_TIMEOUT.getPreferredName()), - ShardFollowTask.RETRY_TIMEOUT, ObjectParser.ValueType.STRING); - PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), ShardFollowTask.IDLE_SHARD_RETRY_DELAY.getPreferredName()), - ShardFollowTask.IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); - } - - public static Request fromXContent(XContentParser parser, String followerIndex) throws IOException { - Request request = PARSER.parse(parser, followerIndex); - if (followerIndex != null) { - if (request.followerIndex == null) { - request.followerIndex = followerIndex; - } else { - if (request.followerIndex.equals(followerIndex) == false) { - throw new IllegalArgumentException("provided follower_index is not equal"); - } - } - } - return request; - } - - private String leaderIndex; - private String followerIndex; - private int maxBatchOperationCount; - private int maxConcurrentReadBatches; - private long maxOperationSizeInBytes; - private int maxConcurrentWriteBatches; - private int maxWriteBufferSize; - private TimeValue retryTimeout; - private TimeValue idleShardRetryDelay; - - public Request( - String leaderIndex, - String followerIndex, - Integer maxBatchOperationCount, - Integer maxConcurrentReadBatches, - Long maxOperationSizeInBytes, - Integer maxConcurrentWriteBatches, - Integer maxWriteBufferSize, - TimeValue retryTimeout, - TimeValue idleShardRetryDelay) { - - if (leaderIndex == null) { - throw new IllegalArgumentException("leader_index is missing"); - } - if (followerIndex == null) { - throw new IllegalArgumentException("follower_index is missing"); - } - if (maxBatchOperationCount == null) { - maxBatchOperationCount = ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT; - } - if (maxConcurrentReadBatches == null) { - maxConcurrentReadBatches = ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES; - } - if (maxOperationSizeInBytes == null) { - maxOperationSizeInBytes = ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; - } - if (maxConcurrentWriteBatches == null) { - maxConcurrentWriteBatches = ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES; - } - if (maxWriteBufferSize == null) { - maxWriteBufferSize = ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE; - } - if (retryTimeout == null) { - retryTimeout = ShardFollowNodeTask.DEFAULT_RETRY_TIMEOUT; - } - if (idleShardRetryDelay == null) { - idleShardRetryDelay = ShardFollowNodeTask.DEFAULT_IDLE_SHARD_RETRY_DELAY; - } - - if (maxBatchOperationCount < 1) { - throw new IllegalArgumentException("maxBatchOperationCount must be larger than 0"); - } - if (maxConcurrentReadBatches < 1) { - throw new IllegalArgumentException("concurrent_processors must be larger than 0"); - } - if (maxOperationSizeInBytes <= 0) { - throw new IllegalArgumentException("processor_max_translog_bytes must be larger than 0"); - } - if (maxConcurrentWriteBatches < 1) { - throw new IllegalArgumentException("maxConcurrentWriteBatches must be larger than 0"); - } - if (maxWriteBufferSize < 1) { - throw new IllegalArgumentException("maxWriteBufferSize must be larger than 0"); - } - - this.leaderIndex = leaderIndex; - this.followerIndex = followerIndex; - this.maxBatchOperationCount = maxBatchOperationCount; - this.maxConcurrentReadBatches = maxConcurrentReadBatches; - this.maxOperationSizeInBytes = maxOperationSizeInBytes; - this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; - this.maxWriteBufferSize = maxWriteBufferSize; - this.retryTimeout = retryTimeout; - this.idleShardRetryDelay = idleShardRetryDelay; - } - - Request() { - } - - public String getLeaderIndex() { - return leaderIndex; - } - - public String getFollowerIndex() { - return followerIndex; - } - - public int getMaxBatchOperationCount() { - return maxBatchOperationCount; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - leaderIndex = in.readString(); - followerIndex = in.readString(); - maxBatchOperationCount = in.readVInt(); - maxConcurrentReadBatches = in.readVInt(); - maxOperationSizeInBytes = in.readVLong(); - maxConcurrentWriteBatches = in.readVInt(); - maxWriteBufferSize = in.readVInt(); - retryTimeout = in.readOptionalTimeValue(); - idleShardRetryDelay = in.readOptionalTimeValue(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(leaderIndex); - out.writeString(followerIndex); - out.writeVInt(maxBatchOperationCount); - out.writeVInt(maxConcurrentReadBatches); - out.writeVLong(maxOperationSizeInBytes); - out.writeVInt(maxConcurrentWriteBatches); - out.writeVInt(maxWriteBufferSize); - out.writeOptionalTimeValue(retryTimeout); - out.writeOptionalTimeValue(idleShardRetryDelay); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex); - builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); - builder.field(ShardFollowTask.MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); - builder.field(ShardFollowTask.MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes); - builder.field(ShardFollowTask.MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); - builder.field(ShardFollowTask.MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); - builder.field(ShardFollowTask.MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); - builder.field(ShardFollowTask.RETRY_TIMEOUT.getPreferredName(), retryTimeout.getStringRep()); - builder.field(ShardFollowTask.IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); - } - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Request request = (Request) o; - return maxBatchOperationCount == request.maxBatchOperationCount && - maxConcurrentReadBatches == request.maxConcurrentReadBatches && - maxOperationSizeInBytes == request.maxOperationSizeInBytes && - maxConcurrentWriteBatches == request.maxConcurrentWriteBatches && - maxWriteBufferSize == request.maxWriteBufferSize && - Objects.equals(retryTimeout, request.retryTimeout) && - Objects.equals(idleShardRetryDelay, request.idleShardRetryDelay) && - Objects.equals(leaderIndex, request.leaderIndex) && - Objects.equals(followerIndex, request.followerIndex); - } - - @Override - public int hashCode() { - return Objects.hash( - leaderIndex, - followerIndex, - maxBatchOperationCount, - maxConcurrentReadBatches, - maxOperationSizeInBytes, - maxConcurrentWriteBatches, - maxWriteBufferSize, - retryTimeout, - idleShardRetryDelay - ); - } - } - - public static class TransportAction extends HandledTransportAction { - - private final Client client; - private final ThreadPool threadPool; - private final ClusterService clusterService; - private final RemoteClusterService remoteClusterService; - private final PersistentTasksService persistentTasksService; - private final IndicesService indicesService; - private final CcrLicenseChecker ccrLicenseChecker; - - @Inject - public TransportAction( - final Settings settings, - final ThreadPool threadPool, - final TransportService transportService, - final ActionFilters actionFilters, - final Client client, - final ClusterService clusterService, - final PersistentTasksService persistentTasksService, - final IndicesService indicesService, - final CcrLicenseChecker ccrLicenseChecker) { - super(settings, NAME, transportService, actionFilters, Request::new); - this.client = client; - this.threadPool = threadPool; - this.clusterService = clusterService; - this.remoteClusterService = transportService.getRemoteClusterService(); - this.persistentTasksService = persistentTasksService; - this.indicesService = indicesService; - this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); - } - - @Override - protected void doExecute(final Task task, - final Request request, - final ActionListener listener) { - if (ccrLicenseChecker.isCcrAllowed() == false) { - listener.onFailure(LicenseUtils.newComplianceException("ccr")); - return; - } - final String[] indices = new String[]{request.leaderIndex}; - final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); - if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { - followLocalIndex(request, listener); - } else { - assert remoteClusterIndices.size() == 1; - final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); - assert entry.getValue().size() == 1; - final String clusterAlias = entry.getKey(); - final String leaderIndex = entry.getValue().get(0); - followRemoteIndex(request, clusterAlias, leaderIndex, listener); - } - } - - private void followLocalIndex(final Request request, - final ActionListener listener) { - final ClusterState state = clusterService.state(); - final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); - // following an index in local cluster, so use local cluster state to fetch leader index metadata - final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getLeaderIndex()); - try { - start(request, null, leaderIndexMetadata, followerIndexMetadata, listener); - } catch (final IOException e) { - listener.onFailure(e); - } - } - - private void followRemoteIndex( - final Request request, - final String clusterAlias, - final String leaderIndex, - final ActionListener listener) { - final ClusterState state = clusterService.state(); - final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); - ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( - client, - clusterAlias, - leaderIndex, - listener::onFailure, - leaderIndexMetadata -> { - try { - start(request, clusterAlias, leaderIndexMetadata, followerIndexMetadata, listener); - } catch (final IOException e) { - listener.onFailure(e); - } - }); - } - - /** - * Performs validation on the provided leader and follow {@link IndexMetaData} instances and then - * creates a persistent task for each leader primary shard. This persistent tasks track changes in the leader - * shard and replicate these changes to a follower shard. - * - * Currently the following validation is performed: - *

    - *
  • The leader index and follow index need to have the same number of primary shards
  • - *
- */ - void start( - Request request, - String clusterNameAlias, - IndexMetaData leaderIndexMetadata, - IndexMetaData followIndexMetadata, - ActionListener handler) throws IOException { - - MapperService mapperService = followIndexMetadata != null ? indicesService.createIndexMapperService(followIndexMetadata) : null; - validate(request, leaderIndexMetadata, followIndexMetadata, mapperService); - final int numShards = followIndexMetadata.getNumberOfShards(); - final AtomicInteger counter = new AtomicInteger(numShards); - final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); - Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() - .filter(e -> ShardFollowTask.HEADER_FILTERS.contains(e.getKey())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));for (int i = 0; i < numShards; i++) { - final int shardId = i; - String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; - - ShardFollowTask shardFollowTask = new ShardFollowTask(clusterNameAlias, - new ShardId(followIndexMetadata.getIndex(), shardId), - new ShardId(leaderIndexMetadata.getIndex(), shardId), - request.maxBatchOperationCount, request.maxConcurrentReadBatches, request.maxOperationSizeInBytes, - request.maxConcurrentWriteBatches, request.maxWriteBufferSize, request.retryTimeout, - request.idleShardRetryDelay, filteredHeaders); - persistentTasksService.sendStartRequest(taskId, ShardFollowTask.NAME, shardFollowTask, - new ActionListener>() { - @Override - public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { - responses.set(shardId, task); - finalizeResponse(); - } - - @Override - public void onFailure(Exception e) { - responses.set(shardId, e); - finalizeResponse(); - } - - void finalizeResponse() { - Exception error = null; - if (counter.decrementAndGet() == 0) { - for (int j = 0; j < responses.length(); j++) { - Object response = responses.get(j); - if (response instanceof Exception) { - if (error == null) { - error = (Exception) response; - } else { - error.addSuppressed((Throwable) response); - } - } - } - - if (error == null) { - // include task ids? - handler.onResponse(new AcknowledgedResponse(true)); - } else { - // TODO: cancel all started tasks - handler.onFailure(error); - } - } - } - } - ); - } - } - } - - private static final Set> WHITELISTED_SETTINGS; - - static { - Set> whiteListedSettings = new HashSet<>(); - whiteListedSettings.add(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING); - - whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING); - whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING); - whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING); - whiteListedSettings.add(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING); - - whiteListedSettings.add(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING); - whiteListedSettings.add(IndexSettings.MAX_RESULT_WINDOW_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_WARMER_ENABLED_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING); - whiteListedSettings.add(IndexSettings.MAX_RESCORE_WINDOW_SETTING); - whiteListedSettings.add(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING); - whiteListedSettings.add(IndexSettings.DEFAULT_FIELD_SETTING); - whiteListedSettings.add(IndexSettings.QUERY_STRING_LENIENT_SETTING); - whiteListedSettings.add(IndexSettings.QUERY_STRING_ANALYZE_WILDCARD); - whiteListedSettings.add(IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD); - whiteListedSettings.add(IndexSettings.ALLOW_UNMAPPED); - whiteListedSettings.add(IndexSettings.INDEX_SEARCH_IDLE_AFTER); - whiteListedSettings.add(BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); - - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING); - - whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); - - WHITELISTED_SETTINGS = Collections.unmodifiableSet(whiteListedSettings); - } - - static void validate(Request request, - IndexMetaData leaderIndex, - IndexMetaData followIndex, MapperService followerMapperService) { - if (leaderIndex == null) { - throw new IllegalArgumentException("leader index [" + request.leaderIndex + "] does not exist"); - } - if (followIndex == null) { - throw new IllegalArgumentException("follow index [" + request.followerIndex + "] does not exist"); - } - if (leaderIndex.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) == false) { - throw new IllegalArgumentException("leader index [" + request.leaderIndex + "] does not have soft deletes enabled"); - } - if (leaderIndex.getNumberOfShards() != followIndex.getNumberOfShards()) { - throw new IllegalArgumentException("leader index primary shards [" + leaderIndex.getNumberOfShards() + - "] does not match with the number of shards of the follow index [" + followIndex.getNumberOfShards() + "]"); - } - if (leaderIndex.getRoutingNumShards() != followIndex.getRoutingNumShards()) { - throw new IllegalArgumentException("leader index number_of_routing_shards [" + leaderIndex.getRoutingNumShards() + - "] does not match with the number_of_routing_shards of the follow index [" + followIndex.getRoutingNumShards() + "]"); - } - if (leaderIndex.getState() != IndexMetaData.State.OPEN || followIndex.getState() != IndexMetaData.State.OPEN) { - throw new IllegalArgumentException("leader and follow index must be open"); - } - if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(followIndex.getSettings()) == false) { - throw new IllegalArgumentException("the following index [" + request.followerIndex + "] is not ready " + - "to follow; the setting [" + CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey() + "] must be enabled."); - } - // Make a copy, remove settings that are allowed to be different and then compare if the settings are equal. - Settings leaderSettings = filter(leaderIndex.getSettings()); - Settings followerSettings = filter(followIndex.getSettings()); - if (leaderSettings.equals(followerSettings) == false) { - throw new IllegalArgumentException("the leader and follower index settings must be identical"); - } - - // Validates if the current follower mapping is mergable with the leader mapping. - // This also validates for example whether specific mapper plugins have been installed - followerMapperService.merge(leaderIndex, MapperService.MergeReason.MAPPING_RECOVERY); - } - - private static Settings filter(Settings originalSettings) { - Settings.Builder settings = Settings.builder().put(originalSettings); - // Remove settings that are always going to be different between leader and follow index: - settings.remove(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey()); - settings.remove(IndexMetaData.SETTING_INDEX_UUID); - settings.remove(IndexMetaData.SETTING_INDEX_PROVIDED_NAME); - settings.remove(IndexMetaData.SETTING_CREATION_DATE); - - Iterator iterator = settings.keys().iterator(); - while (iterator.hasNext()) { - String key = iterator.next(); - for (Setting whitelistedSetting : WHITELISTED_SETTINGS) { - if (whitelistedSetting.match(key)) { - iterator.remove(); - break; - } - } - } - return settings.build(); - } - -} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index d102c6b5b7a..eef3671d516 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import java.io.IOException; import java.util.ArrayList; @@ -57,11 +58,13 @@ public class ShardChangesAction extends Action { private long fromSeqNo; private int maxOperationCount; private ShardId shardId; - private long maxOperationSizeInBytes = ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; + private String expectedHistoryUUID; + private long maxOperationSizeInBytes = FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; - public Request(ShardId shardId) { + public Request(ShardId shardId, String expectedHistoryUUID) { super(shardId.getIndexName()); this.shardId = shardId; + this.expectedHistoryUUID = expectedHistoryUUID; } Request() { @@ -95,6 +98,10 @@ public class ShardChangesAction extends Action { this.maxOperationSizeInBytes = maxOperationSizeInBytes; } + public String getExpectedHistoryUUID() { + return expectedHistoryUUID; + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -118,6 +125,7 @@ public class ShardChangesAction extends Action { fromSeqNo = in.readVLong(); maxOperationCount = in.readVInt(); shardId = ShardId.readShardId(in); + expectedHistoryUUID = in.readString(); maxOperationSizeInBytes = in.readVLong(); } @@ -127,6 +135,7 @@ public class ShardChangesAction extends Action { out.writeVLong(fromSeqNo); out.writeVInt(maxOperationCount); shardId.writeTo(out); + out.writeString(expectedHistoryUUID); out.writeVLong(maxOperationSizeInBytes); } @@ -139,12 +148,13 @@ public class ShardChangesAction extends Action { return fromSeqNo == request.fromSeqNo && maxOperationCount == request.maxOperationCount && Objects.equals(shardId, request.shardId) && + Objects.equals(expectedHistoryUUID, request.expectedHistoryUUID) && maxOperationSizeInBytes == request.maxOperationSizeInBytes; } @Override public int hashCode() { - return Objects.hash(fromSeqNo, maxOperationCount, shardId, maxOperationSizeInBytes); + return Objects.hash(fromSeqNo, maxOperationCount, shardId, expectedHistoryUUID, maxOperationSizeInBytes); } @Override @@ -153,6 +163,7 @@ public class ShardChangesAction extends Action { "fromSeqNo=" + fromSeqNo + ", maxOperationCount=" + maxOperationCount + ", shardId=" + shardId + + ", expectedHistoryUUID=" + expectedHistoryUUID + ", maxOperationSizeInBytes=" + maxOperationSizeInBytes + '}'; } @@ -188,7 +199,12 @@ public class ShardChangesAction extends Action { Response() { } - Response(final long mappingVersion, final long globalCheckpoint, final long maxSeqNo, final Translog.Operation[] operations) { + Response( + final long mappingVersion, + final long globalCheckpoint, + final long maxSeqNo, + final Translog.Operation[] operations) { + this.mappingVersion = mappingVersion; this.globalCheckpoint = globalCheckpoint; this.maxSeqNo = maxSeqNo; @@ -259,6 +275,7 @@ public class ShardChangesAction extends Action { seqNoStats.getGlobalCheckpoint(), request.fromSeqNo, request.maxOperationCount, + request.expectedHistoryUUID, request.maxOperationSizeInBytes); return new Response(mappingVersion, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), operations); } @@ -292,11 +309,20 @@ public class ShardChangesAction extends Action { * Also if the sum of collected operations' size is above the specified maxOperationSizeInBytes then this method * stops collecting more operations and returns what has been collected so far. */ - static Translog.Operation[] getOperations(IndexShard indexShard, long globalCheckpoint, long fromSeqNo, int maxOperationCount, + static Translog.Operation[] getOperations(IndexShard indexShard, + long globalCheckpoint, + long fromSeqNo, + int maxOperationCount, + String expectedHistoryUUID, long maxOperationSizeInBytes) throws IOException { if (indexShard.state() != IndexShardState.STARTED) { throw new IndexShardNotStartedException(indexShard.shardId(), indexShard.state()); } + final String historyUUID = indexShard.getHistoryUUID(); + if (historyUUID.equals(expectedHistoryUUID) == false) { + throw new IllegalStateException("unexpected history uuid, expected [" + expectedHistoryUUID + "], actual [" + + historyUUID + "]"); + } if (fromSeqNo > globalCheckpoint) { return EMPTY_OPERATIONS_ARRAY; } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index 00e3aaaae2a..f88f21e4072 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -10,35 +10,23 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.transport.NetworkExceptionHelper; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.persistent.AllocatedPersistentTask; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; -import java.io.IOException; -import java.util.AbstractMap; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.NavigableMap; -import java.util.Objects; import java.util.PriorityQueue; import java.util.Queue; import java.util.TreeMap; @@ -48,7 +36,6 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.LongConsumer; import java.util.function.LongSupplier; -import java.util.stream.Collectors; /** * The node task that fetch the write operations from a leader shard and @@ -56,20 +43,12 @@ import java.util.stream.Collectors; */ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { - public static final int DEFAULT_MAX_BATCH_OPERATION_COUNT = 1024; - public static final int DEFAULT_MAX_CONCURRENT_READ_BATCHES = 1; - public static final int DEFAULT_MAX_CONCURRENT_WRITE_BATCHES = 1; - public static final int DEFAULT_MAX_WRITE_BUFFER_SIZE = 10240; - public static final long DEFAULT_MAX_BATCH_SIZE_IN_BYTES = Long.MAX_VALUE; - private static final int RETRY_LIMIT = 10; - public static final TimeValue DEFAULT_RETRY_TIMEOUT = new TimeValue(500); - public static final TimeValue DEFAULT_IDLE_SHARD_RETRY_DELAY = TimeValue.timeValueSeconds(10); - + private static final int DELAY_MILLIS = 50; private static final Logger LOGGER = Loggers.getLogger(ShardFollowNodeTask.class); private final String leaderIndex; private final ShardFollowTask params; - private final TimeValue retryTimeout; + private final TimeValue maxRetryDelay; private final TimeValue idleShardChangesRequestDelay; private final BiConsumer scheduler; private final LongSupplier relativeTimeProvider; @@ -101,7 +80,7 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { this.params = params; this.scheduler = scheduler; this.relativeTimeProvider = relativeTimeProvider; - this.retryTimeout = params.getRetryTimeout(); + this.maxRetryDelay = params.getMaxRetryDelay(); this.idleShardChangesRequestDelay = params.getIdleShardRetryDelay(); /* * We keep track of the most recent fetch exceptions, with the number of exceptions that we track equal to the maximum number of @@ -379,20 +358,28 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { private void handleFailure(Exception e, AtomicInteger retryCounter, Runnable task) { assert e != null; - if (shouldRetry(e)) { - if (isStopped() == false && retryCounter.incrementAndGet() <= RETRY_LIMIT) { - LOGGER.debug(new ParameterizedMessage("{} error during follow shard task, retrying...", params.getFollowShardId()), e); - scheduler.accept(retryTimeout, task); - } else { - markAsFailed(new ElasticsearchException("retrying failed [" + retryCounter.get() + - "] times, aborting...", e)); - } + if (shouldRetry(e) && isStopped() == false) { + int currentRetry = retryCounter.incrementAndGet(); + LOGGER.debug(new ParameterizedMessage("{} error during follow shard task, retrying [{}]", + params.getFollowShardId(), currentRetry), e); + long delay = computeDelay(currentRetry, maxRetryDelay.getMillis()); + scheduler.accept(TimeValue.timeValueMillis(delay), task); } else { markAsFailed(e); } } - private boolean shouldRetry(Exception e) { + static long computeDelay(int currentRetry, long maxRetryDelayInMillis) { + // Cap currentRetry to avoid overflow when computing n variable + int maxCurrentRetry = Math.min(currentRetry, 24); + long n = Math.round(Math.pow(2, maxCurrentRetry - 1)); + // + 1 here, because nextInt(...) bound is exclusive and otherwise the first delay would always be zero. + int k = Randomness.get().nextInt(Math.toIntExact(n + 1)); + int backOffDelay = k * DELAY_MILLIS; + return Math.min(backOffDelay, maxRetryDelayInMillis); + } + + private static boolean shouldRetry(Exception e) { return NetworkExceptionHelper.isConnectException(e) || NetworkExceptionHelper.isCloseConnectionException(e) || TransportActions.isShardNotAvailableException(e); @@ -421,7 +408,7 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { } @Override - public synchronized Status getStatus() { + public synchronized ShardFollowNodeTaskStatus getStatus() { final long timeSinceLastFetchMillis; if (lastFetchTime != -1) { timeSinceLastFetchMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - lastFetchTime); @@ -429,8 +416,9 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { // To avoid confusion when ccr didn't yet execute a fetch: timeSinceLastFetchMillis = -1; } - return new Status( + return new ShardFollowNodeTaskStatus( leaderIndex, + params.getFollowShardId().getIndexName(), getFollowShardId().getId(), leaderGlobalCheckpoint, leaderMaxSeqNo, @@ -454,476 +442,4 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { timeSinceLastFetchMillis); } - public static class Status implements Task.Status { - - public static final String STATUS_PARSER_NAME = "shard-follow-node-task-status"; - - static final ParseField LEADER_INDEX = new ParseField("leader_index"); - static final ParseField SHARD_ID = new ParseField("shard_id"); - static final ParseField LEADER_GLOBAL_CHECKPOINT_FIELD = new ParseField("leader_global_checkpoint"); - static final ParseField LEADER_MAX_SEQ_NO_FIELD = new ParseField("leader_max_seq_no"); - static final ParseField FOLLOWER_GLOBAL_CHECKPOINT_FIELD = new ParseField("follower_global_checkpoint"); - static final ParseField FOLLOWER_MAX_SEQ_NO_FIELD = new ParseField("follower_max_seq_no"); - static final ParseField LAST_REQUESTED_SEQ_NO_FIELD = new ParseField("last_requested_seq_no"); - static final ParseField NUMBER_OF_CONCURRENT_READS_FIELD = new ParseField("number_of_concurrent_reads"); - static final ParseField NUMBER_OF_CONCURRENT_WRITES_FIELD = new ParseField("number_of_concurrent_writes"); - static final ParseField NUMBER_OF_QUEUED_WRITES_FIELD = new ParseField("number_of_queued_writes"); - static final ParseField MAPPING_VERSION_FIELD = new ParseField("mapping_version"); - static final ParseField TOTAL_FETCH_TIME_MILLIS_FIELD = new ParseField("total_fetch_time_millis"); - static final ParseField NUMBER_OF_SUCCESSFUL_FETCHES_FIELD = new ParseField("number_of_successful_fetches"); - static final ParseField NUMBER_OF_FAILED_FETCHES_FIELD = new ParseField("number_of_failed_fetches"); - static final ParseField OPERATIONS_RECEIVED_FIELD = new ParseField("operations_received"); - static final ParseField TOTAL_TRANSFERRED_BYTES = new ParseField("total_transferred_bytes"); - static final ParseField TOTAL_INDEX_TIME_MILLIS_FIELD = new ParseField("total_index_time_millis"); - static final ParseField NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD = new ParseField("number_of_successful_bulk_operations"); - static final ParseField NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD = new ParseField("number_of_failed_bulk_operations"); - static final ParseField NUMBER_OF_OPERATIONS_INDEXED_FIELD = new ParseField("number_of_operations_indexed"); - static final ParseField FETCH_EXCEPTIONS = new ParseField("fetch_exceptions"); - static final ParseField TIME_SINCE_LAST_FETCH_MILLIS_FIELD = new ParseField("time_since_last_fetch_millis"); - - @SuppressWarnings("unchecked") - static final ConstructingObjectParser STATUS_PARSER = new ConstructingObjectParser<>(STATUS_PARSER_NAME, - args -> new Status( - (String) args[0], - (int) args[1], - (long) args[2], - (long) args[3], - (long) args[4], - (long) args[5], - (long) args[6], - (int) args[7], - (int) args[8], - (int) args[9], - (long) args[10], - (long) args[11], - (long) args[12], - (long) args[13], - (long) args[14], - (long) args[15], - (long) args[16], - (long) args[17], - (long) args[18], - (long) args[19], - new TreeMap<>( - ((List>) args[20]) - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))), - (long) args[21])); - - public static final String FETCH_EXCEPTIONS_ENTRY_PARSER_NAME = "shard-follow-node-task-status-fetch-exceptions-entry"; - - static final ConstructingObjectParser, Void> FETCH_EXCEPTIONS_ENTRY_PARSER = - new ConstructingObjectParser<>( - FETCH_EXCEPTIONS_ENTRY_PARSER_NAME, - args -> new AbstractMap.SimpleEntry<>((long) args[0], (ElasticsearchException) args[1])); - - static { - STATUS_PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_INDEX); - STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), SHARD_ID); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_GLOBAL_CHECKPOINT_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_MAX_SEQ_NO_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_GLOBAL_CHECKPOINT_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_MAX_SEQ_NO_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LAST_REQUESTED_SEQ_NO_FIELD); - STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_READS_FIELD); - STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_WRITES_FIELD); - STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_QUEUED_WRITES_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), MAPPING_VERSION_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_FETCH_TIME_MILLIS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_FETCHES_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_FETCHES_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), OPERATIONS_RECEIVED_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_TRANSFERRED_BYTES); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_INDEX_TIME_MILLIS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_OPERATIONS_INDEXED_FIELD); - STATUS_PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_PARSER, FETCH_EXCEPTIONS); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIME_SINCE_LAST_FETCH_MILLIS_FIELD); - } - - static final ParseField FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO = new ParseField("from_seq_no"); - static final ParseField FETCH_EXCEPTIONS_ENTRY_EXCEPTION = new ParseField("exception"); - - static { - FETCH_EXCEPTIONS_ENTRY_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO); - FETCH_EXCEPTIONS_ENTRY_PARSER.declareObject( - ConstructingObjectParser.constructorArg(), - (p, c) -> ElasticsearchException.fromXContent(p), - FETCH_EXCEPTIONS_ENTRY_EXCEPTION); - } - - private final String leaderIndex; - - public String leaderIndex() { - return leaderIndex; - } - - private final int shardId; - - public int getShardId() { - return shardId; - } - - private final long leaderGlobalCheckpoint; - - public long leaderGlobalCheckpoint() { - return leaderGlobalCheckpoint; - } - - private final long leaderMaxSeqNo; - - public long leaderMaxSeqNo() { - return leaderMaxSeqNo; - } - - private final long followerGlobalCheckpoint; - - public long followerGlobalCheckpoint() { - return followerGlobalCheckpoint; - } - - private final long followerMaxSeqNo; - - public long followerMaxSeqNo() { - return followerMaxSeqNo; - } - - private final long lastRequestedSeqNo; - - public long lastRequestedSeqNo() { - return lastRequestedSeqNo; - } - - private final int numberOfConcurrentReads; - - public int numberOfConcurrentReads() { - return numberOfConcurrentReads; - } - - private final int numberOfConcurrentWrites; - - public int numberOfConcurrentWrites() { - return numberOfConcurrentWrites; - } - - private final int numberOfQueuedWrites; - - public int numberOfQueuedWrites() { - return numberOfQueuedWrites; - } - - private final long mappingVersion; - - public long mappingVersion() { - return mappingVersion; - } - - private final long totalFetchTimeMillis; - - public long totalFetchTimeMillis() { - return totalFetchTimeMillis; - } - - private final long numberOfSuccessfulFetches; - - public long numberOfSuccessfulFetches() { - return numberOfSuccessfulFetches; - } - - private final long numberOfFailedFetches; - - public long numberOfFailedFetches() { - return numberOfFailedFetches; - } - - private final long operationsReceived; - - public long operationsReceived() { - return operationsReceived; - } - - private final long totalTransferredBytes; - - public long totalTransferredBytes() { - return totalTransferredBytes; - } - - private final long totalIndexTimeMillis; - - public long totalIndexTimeMillis() { - return totalIndexTimeMillis; - } - - private final long numberOfSuccessfulBulkOperations; - - public long numberOfSuccessfulBulkOperations() { - return numberOfSuccessfulBulkOperations; - } - - private final long numberOfFailedBulkOperations; - - public long numberOfFailedBulkOperations() { - return numberOfFailedBulkOperations; - } - - private final long numberOfOperationsIndexed; - - public long numberOfOperationsIndexed() { - return numberOfOperationsIndexed; - } - - private final NavigableMap fetchExceptions; - - public NavigableMap fetchExceptions() { - return fetchExceptions; - } - - private final long timeSinceLastFetchMillis; - - public long timeSinceLastFetchMillis() { - return timeSinceLastFetchMillis; - } - - Status( - final String leaderIndex, - final int shardId, - final long leaderGlobalCheckpoint, - final long leaderMaxSeqNo, - final long followerGlobalCheckpoint, - final long followerMaxSeqNo, - final long lastRequestedSeqNo, - final int numberOfConcurrentReads, - final int numberOfConcurrentWrites, - final int numberOfQueuedWrites, - final long mappingVersion, - final long totalFetchTimeMillis, - final long numberOfSuccessfulFetches, - final long numberOfFailedFetches, - final long operationsReceived, - final long totalTransferredBytes, - final long totalIndexTimeMillis, - final long numberOfSuccessfulBulkOperations, - final long numberOfFailedBulkOperations, - final long numberOfOperationsIndexed, - final NavigableMap fetchExceptions, - final long timeSinceLastFetchMillis) { - this.leaderIndex = leaderIndex; - this.shardId = shardId; - this.leaderGlobalCheckpoint = leaderGlobalCheckpoint; - this.leaderMaxSeqNo = leaderMaxSeqNo; - this.followerGlobalCheckpoint = followerGlobalCheckpoint; - this.followerMaxSeqNo = followerMaxSeqNo; - this.lastRequestedSeqNo = lastRequestedSeqNo; - this.numberOfConcurrentReads = numberOfConcurrentReads; - this.numberOfConcurrentWrites = numberOfConcurrentWrites; - this.numberOfQueuedWrites = numberOfQueuedWrites; - this.mappingVersion = mappingVersion; - this.totalFetchTimeMillis = totalFetchTimeMillis; - this.numberOfSuccessfulFetches = numberOfSuccessfulFetches; - this.numberOfFailedFetches = numberOfFailedFetches; - this.operationsReceived = operationsReceived; - this.totalTransferredBytes = totalTransferredBytes; - this.totalIndexTimeMillis = totalIndexTimeMillis; - this.numberOfSuccessfulBulkOperations = numberOfSuccessfulBulkOperations; - this.numberOfFailedBulkOperations = numberOfFailedBulkOperations; - this.numberOfOperationsIndexed = numberOfOperationsIndexed; - this.fetchExceptions = Objects.requireNonNull(fetchExceptions); - this.timeSinceLastFetchMillis = timeSinceLastFetchMillis; - } - - public Status(final StreamInput in) throws IOException { - this.leaderIndex = in.readString(); - this.shardId = in.readVInt(); - this.leaderGlobalCheckpoint = in.readZLong(); - this.leaderMaxSeqNo = in.readZLong(); - this.followerGlobalCheckpoint = in.readZLong(); - this.followerMaxSeqNo = in.readZLong(); - this.lastRequestedSeqNo = in.readZLong(); - this.numberOfConcurrentReads = in.readVInt(); - this.numberOfConcurrentWrites = in.readVInt(); - this.numberOfQueuedWrites = in.readVInt(); - this.mappingVersion = in.readVLong(); - this.totalFetchTimeMillis = in.readVLong(); - this.numberOfSuccessfulFetches = in.readVLong(); - this.numberOfFailedFetches = in.readVLong(); - this.operationsReceived = in.readVLong(); - this.totalTransferredBytes = in.readVLong(); - this.totalIndexTimeMillis = in.readVLong(); - this.numberOfSuccessfulBulkOperations = in.readVLong(); - this.numberOfFailedBulkOperations = in.readVLong(); - this.numberOfOperationsIndexed = in.readVLong(); - this.fetchExceptions = new TreeMap<>(in.readMap(StreamInput::readVLong, StreamInput::readException)); - this.timeSinceLastFetchMillis = in.readZLong(); - } - - @Override - public String getWriteableName() { - return STATUS_PARSER_NAME; - } - - @Override - public void writeTo(final StreamOutput out) throws IOException { - out.writeString(leaderIndex); - out.writeVInt(shardId); - out.writeZLong(leaderGlobalCheckpoint); - out.writeZLong(leaderMaxSeqNo); - out.writeZLong(followerGlobalCheckpoint); - out.writeZLong(followerMaxSeqNo); - out.writeZLong(lastRequestedSeqNo); - out.writeVInt(numberOfConcurrentReads); - out.writeVInt(numberOfConcurrentWrites); - out.writeVInt(numberOfQueuedWrites); - out.writeVLong(mappingVersion); - out.writeVLong(totalFetchTimeMillis); - out.writeVLong(numberOfSuccessfulFetches); - out.writeVLong(numberOfFailedFetches); - out.writeVLong(operationsReceived); - out.writeVLong(totalTransferredBytes); - out.writeVLong(totalIndexTimeMillis); - out.writeVLong(numberOfSuccessfulBulkOperations); - out.writeVLong(numberOfFailedBulkOperations); - out.writeVLong(numberOfOperationsIndexed); - out.writeMap(fetchExceptions, StreamOutput::writeVLong, StreamOutput::writeException); - out.writeZLong(timeSinceLastFetchMillis); - } - - @Override - public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { - builder.startObject(); - { - builder.field(LEADER_INDEX.getPreferredName(), leaderIndex); - builder.field(SHARD_ID.getPreferredName(), shardId); - builder.field(LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), leaderGlobalCheckpoint); - builder.field(LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), leaderMaxSeqNo); - builder.field(FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), followerGlobalCheckpoint); - builder.field(FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), followerMaxSeqNo); - builder.field(LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), lastRequestedSeqNo); - builder.field(NUMBER_OF_CONCURRENT_READS_FIELD.getPreferredName(), numberOfConcurrentReads); - builder.field(NUMBER_OF_CONCURRENT_WRITES_FIELD.getPreferredName(), numberOfConcurrentWrites); - builder.field(NUMBER_OF_QUEUED_WRITES_FIELD.getPreferredName(), numberOfQueuedWrites); - builder.field(MAPPING_VERSION_FIELD.getPreferredName(), mappingVersion); - builder.humanReadableField( - TOTAL_FETCH_TIME_MILLIS_FIELD.getPreferredName(), - "total_fetch_time", - new TimeValue(totalFetchTimeMillis, TimeUnit.MILLISECONDS)); - builder.field(NUMBER_OF_SUCCESSFUL_FETCHES_FIELD.getPreferredName(), numberOfSuccessfulFetches); - builder.field(NUMBER_OF_FAILED_FETCHES_FIELD.getPreferredName(), numberOfFailedFetches); - builder.field(OPERATIONS_RECEIVED_FIELD.getPreferredName(), operationsReceived); - builder.humanReadableField( - TOTAL_TRANSFERRED_BYTES.getPreferredName(), - "total_transferred", - new ByteSizeValue(totalTransferredBytes, ByteSizeUnit.BYTES)); - builder.humanReadableField( - TOTAL_INDEX_TIME_MILLIS_FIELD.getPreferredName(), - "total_index_time", - new TimeValue(totalIndexTimeMillis, TimeUnit.MILLISECONDS)); - builder.field(NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfSuccessfulBulkOperations); - builder.field(NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfFailedBulkOperations); - builder.field(NUMBER_OF_OPERATIONS_INDEXED_FIELD.getPreferredName(), numberOfOperationsIndexed); - builder.startArray(FETCH_EXCEPTIONS.getPreferredName()); - { - for (final Map.Entry entry : fetchExceptions.entrySet()) { - builder.startObject(); - { - builder.field(FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), entry.getKey()); - builder.field(FETCH_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); - builder.startObject(); - { - ElasticsearchException.generateThrowableXContent(builder, params, entry.getValue()); - } - builder.endObject(); - } - builder.endObject(); - } - } - builder.endArray(); - builder.humanReadableField( - TIME_SINCE_LAST_FETCH_MILLIS_FIELD.getPreferredName(), - "time_since_last_fetch", - new TimeValue(timeSinceLastFetchMillis, TimeUnit.MILLISECONDS)); - } - builder.endObject(); - return builder; - } - - public static Status fromXContent(final XContentParser parser) { - return STATUS_PARSER.apply(parser, null); - } - - @Override - public boolean equals(final Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - final Status that = (Status) o; - return leaderIndex.equals(that.leaderIndex) && - shardId == that.shardId && - leaderGlobalCheckpoint == that.leaderGlobalCheckpoint && - leaderMaxSeqNo == that.leaderMaxSeqNo && - followerGlobalCheckpoint == that.followerGlobalCheckpoint && - followerMaxSeqNo == that.followerMaxSeqNo && - lastRequestedSeqNo == that.lastRequestedSeqNo && - numberOfConcurrentReads == that.numberOfConcurrentReads && - numberOfConcurrentWrites == that.numberOfConcurrentWrites && - numberOfQueuedWrites == that.numberOfQueuedWrites && - mappingVersion == that.mappingVersion && - totalFetchTimeMillis == that.totalFetchTimeMillis && - numberOfSuccessfulFetches == that.numberOfSuccessfulFetches && - numberOfFailedFetches == that.numberOfFailedFetches && - operationsReceived == that.operationsReceived && - totalTransferredBytes == that.totalTransferredBytes && - numberOfSuccessfulBulkOperations == that.numberOfSuccessfulBulkOperations && - numberOfFailedBulkOperations == that.numberOfFailedBulkOperations && - numberOfOperationsIndexed == that.numberOfOperationsIndexed && - /* - * ElasticsearchException does not implement equals so we will assume the fetch exceptions are equal if they are equal - * up to the key set and their messages. Note that we are relying on the fact that the fetch exceptions are ordered by - * keys. - */ - fetchExceptions.keySet().equals(that.fetchExceptions.keySet()) && - getFetchExceptionMessages(this).equals(getFetchExceptionMessages(that)) && - timeSinceLastFetchMillis == that.timeSinceLastFetchMillis; - } - - @Override - public int hashCode() { - return Objects.hash( - leaderIndex, - shardId, - leaderGlobalCheckpoint, - leaderMaxSeqNo, - followerGlobalCheckpoint, - followerMaxSeqNo, - lastRequestedSeqNo, - numberOfConcurrentReads, - numberOfConcurrentWrites, - numberOfQueuedWrites, - mappingVersion, - totalFetchTimeMillis, - numberOfSuccessfulFetches, - numberOfFailedFetches, - operationsReceived, - totalTransferredBytes, - numberOfSuccessfulBulkOperations, - numberOfFailedBulkOperations, - numberOfOperationsIndexed, - /* - * ElasticsearchException does not implement hash code so we will compute the hash code based on the key set and the - * messages. Note that we are relying on the fact that the fetch exceptions are ordered by keys. - */ - fetchExceptions.keySet(), - getFetchExceptionMessages(this), - timeSinceLastFetchMillis); - } - - private static List getFetchExceptionMessages(final Status status) { - return status.fetchExceptions().values().stream().map(ElasticsearchException::getMessage).collect(Collectors.toList()); - } - - public String toString() { - return Strings.toString(this); - } - - } - } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java index 82482792f39..62894b0ed99 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java @@ -48,14 +48,15 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { public static final ParseField MAX_BATCH_SIZE_IN_BYTES = new ParseField("max_batch_size_in_bytes"); public static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); - public static final ParseField RETRY_TIMEOUT = new ParseField("retry_timeout"); + public static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); public static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay"); + public static final ParseField RECORDED_HISTORY_UUID = new ParseField("recorded_history_uuid"); @SuppressWarnings("unchecked") private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, (a) -> new ShardFollowTask((String) a[0], new ShardId((String) a[1], (String) a[2], (int) a[3]), new ShardId((String) a[4], (String) a[5], (int) a[6]), (int) a[7], (int) a[8], (long) a[9], - (int) a[10], (int) a[11], (TimeValue) a[12], (TimeValue) a[13], (Map) a[14])); + (int) a[10], (int) a[11], (TimeValue) a[12], (TimeValue) a[13], (String) a[14], (Map) a[15])); static { PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), LEADER_CLUSTER_ALIAS_FIELD); @@ -71,11 +72,12 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_CONCURRENT_WRITE_BATCHES); PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_BUFFER_SIZE); PARSER.declareField(ConstructingObjectParser.constructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), RETRY_TIMEOUT.getPreferredName()), - RETRY_TIMEOUT, ObjectParser.ValueType.STRING); + (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), + MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); PARSER.declareField(ConstructingObjectParser.constructorArg(), (p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()), IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); + PARSER.declareString(ConstructingObjectParser.constructorArg(), RECORDED_HISTORY_UUID); PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.mapStrings(), HEADERS); } @@ -87,13 +89,24 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { private final long maxBatchSizeInBytes; private final int maxConcurrentWriteBatches; private final int maxWriteBufferSize; - private final TimeValue retryTimeout; + private final TimeValue maxRetryDelay; private final TimeValue idleShardRetryDelay; + private final String recordedLeaderIndexHistoryUUID; private final Map headers; - ShardFollowTask(String leaderClusterAlias, ShardId followShardId, ShardId leaderShardId, int maxBatchOperationCount, - int maxConcurrentReadBatches, long maxBatchSizeInBytes, int maxConcurrentWriteBatches, - int maxWriteBufferSize, TimeValue retryTimeout, TimeValue idleShardRetryDelay, Map headers) { + ShardFollowTask( + String leaderClusterAlias, + ShardId followShardId, + ShardId leaderShardId, + int maxBatchOperationCount, + int maxConcurrentReadBatches, + long maxBatchSizeInBytes, + int maxConcurrentWriteBatches, + int maxWriteBufferSize, + TimeValue maxRetryDelay, + TimeValue idleShardRetryDelay, + String recordedLeaderIndexHistoryUUID, + Map headers) { this.leaderClusterAlias = leaderClusterAlias; this.followShardId = followShardId; this.leaderShardId = leaderShardId; @@ -102,8 +115,9 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { this.maxBatchSizeInBytes = maxBatchSizeInBytes; this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; this.maxWriteBufferSize = maxWriteBufferSize; - this.retryTimeout = retryTimeout; + this.maxRetryDelay = maxRetryDelay; this.idleShardRetryDelay = idleShardRetryDelay; + this.recordedLeaderIndexHistoryUUID = recordedLeaderIndexHistoryUUID; this.headers = headers != null ? Collections.unmodifiableMap(headers) : Collections.emptyMap(); } @@ -116,8 +130,9 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { this.maxBatchSizeInBytes = in.readVLong(); this.maxConcurrentWriteBatches = in.readVInt(); this.maxWriteBufferSize = in.readVInt(); - this.retryTimeout = in.readTimeValue(); + this.maxRetryDelay = in.readTimeValue(); this.idleShardRetryDelay = in.readTimeValue(); + this.recordedLeaderIndexHistoryUUID = in.readString(); this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); } @@ -153,8 +168,8 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { return maxBatchSizeInBytes; } - public TimeValue getRetryTimeout() { - return retryTimeout; + public TimeValue getMaxRetryDelay() { + return maxRetryDelay; } public TimeValue getIdleShardRetryDelay() { @@ -165,6 +180,10 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { return followShardId.getIndex().getUUID() + "-" + followShardId.getId(); } + public String getRecordedLeaderIndexHistoryUUID() { + return recordedLeaderIndexHistoryUUID; + } + public Map getHeaders() { return headers; } @@ -184,8 +203,9 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { out.writeVLong(maxBatchSizeInBytes); out.writeVInt(maxConcurrentWriteBatches); out.writeVInt(maxWriteBufferSize); - out.writeTimeValue(retryTimeout); + out.writeTimeValue(maxRetryDelay); out.writeTimeValue(idleShardRetryDelay); + out.writeString(recordedLeaderIndexHistoryUUID); out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); } @@ -210,8 +230,9 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { builder.field(MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxBatchSizeInBytes); builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); - builder.field(RETRY_TIMEOUT.getPreferredName(), retryTimeout.getStringRep()); + builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); + builder.field(RECORDED_HISTORY_UUID.getPreferredName(), recordedLeaderIndexHistoryUUID); builder.field(HEADERS.getPreferredName(), headers); return builder.endObject(); } @@ -229,15 +250,28 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { maxConcurrentWriteBatches == that.maxConcurrentWriteBatches && maxBatchSizeInBytes == that.maxBatchSizeInBytes && maxWriteBufferSize == that.maxWriteBufferSize && - Objects.equals(retryTimeout, that.retryTimeout) && + Objects.equals(maxRetryDelay, that.maxRetryDelay) && Objects.equals(idleShardRetryDelay, that.idleShardRetryDelay) && + Objects.equals(recordedLeaderIndexHistoryUUID, that.recordedLeaderIndexHistoryUUID) && Objects.equals(headers, that.headers); } @Override public int hashCode() { - return Objects.hash(leaderClusterAlias, followShardId, leaderShardId, maxBatchOperationCount, maxConcurrentReadBatches, - maxConcurrentWriteBatches, maxBatchSizeInBytes, maxWriteBufferSize, retryTimeout, idleShardRetryDelay, headers); + return Objects.hash( + leaderClusterAlias, + followShardId, + leaderShardId, + maxBatchOperationCount, + maxConcurrentReadBatches, + maxConcurrentWriteBatches, + maxBatchSizeInBytes, + maxWriteBufferSize, + maxRetryDelay, + idleShardRetryDelay, + recordedLeaderIndexHistoryUUID, + headers + ); } public String toString() { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 83e3e4806e1..7b63e73ee59 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -133,7 +133,8 @@ public class ShardFollowTasksExecutor extends PersistentTasksExecutor handler, Consumer errorHandler) { - ShardChangesAction.Request request = new ShardChangesAction.Request(params.getLeaderShardId()); + ShardChangesAction.Request request = + new ShardChangesAction.Request(params.getLeaderShardId(), params.getRecordedLeaderIndexHistoryUUID()); request.setFromSeqNo(from); request.setMaxOperationCount(maxOperationCount); request.setMaxOperationSizeInBytes(params.getMaxBatchSizeInBytes()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java index 3b5d0ac53cf..394b42789d1 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import java.io.IOException; import java.util.Arrays; @@ -33,8 +34,8 @@ import java.util.function.Consumer; public class TransportCcrStatsAction extends TransportTasksAction< ShardFollowNodeTask, - CcrStatsAction.TasksRequest, - CcrStatsAction.TasksResponse, CcrStatsAction.TaskResponse> { + CcrStatsAction.StatsRequest, + CcrStatsAction.StatsResponses, CcrStatsAction.StatsResponse> { private final IndexNameExpressionResolver resolver; private final CcrLicenseChecker ccrLicenseChecker; @@ -53,8 +54,8 @@ public class TransportCcrStatsAction extends TransportTasksAction< clusterService, transportService, actionFilters, - CcrStatsAction.TasksRequest::new, - CcrStatsAction.TasksResponse::new, + CcrStatsAction.StatsRequest::new, + CcrStatsAction.StatsResponses::new, Ccr.CCR_THREAD_POOL_NAME); this.resolver = Objects.requireNonNull(resolver); this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); @@ -63,8 +64,8 @@ public class TransportCcrStatsAction extends TransportTasksAction< @Override protected void doExecute( final Task task, - final CcrStatsAction.TasksRequest request, - final ActionListener listener) { + final CcrStatsAction.StatsRequest request, + final ActionListener listener) { if (ccrLicenseChecker.isCcrAllowed() == false) { listener.onFailure(LicenseUtils.newComplianceException("ccr")); return; @@ -73,21 +74,21 @@ public class TransportCcrStatsAction extends TransportTasksAction< } @Override - protected CcrStatsAction.TasksResponse newResponse( - final CcrStatsAction.TasksRequest request, - final List taskResponses, + protected CcrStatsAction.StatsResponses newResponse( + final CcrStatsAction.StatsRequest request, + final List statsRespons, final List taskOperationFailures, final List failedNodeExceptions) { - return new CcrStatsAction.TasksResponse(taskOperationFailures, failedNodeExceptions, taskResponses); + return new CcrStatsAction.StatsResponses(taskOperationFailures, failedNodeExceptions, statsRespons); } @Override - protected CcrStatsAction.TaskResponse readTaskResponse(final StreamInput in) throws IOException { - return new CcrStatsAction.TaskResponse(in); + protected CcrStatsAction.StatsResponse readTaskResponse(final StreamInput in) throws IOException { + return new CcrStatsAction.StatsResponse(in); } @Override - protected void processTasks(final CcrStatsAction.TasksRequest request, final Consumer operation) { + protected void processTasks(final CcrStatsAction.StatsRequest request, final Consumer operation) { final ClusterState state = clusterService.state(); final Set concreteIndices = new HashSet<>(Arrays.asList(resolver.concreteIndexNames(state, request))); for (final Task task : taskManager.getTasks().values()) { @@ -102,10 +103,10 @@ public class TransportCcrStatsAction extends TransportTasksAction< @Override protected void taskOperation( - final CcrStatsAction.TasksRequest request, + final CcrStatsAction.StatsRequest request, final ShardFollowNodeTask task, - final ActionListener listener) { - listener.onResponse(new CcrStatsAction.TaskResponse(task.getFollowShardId(), task.getStatus())); + final ActionListener listener) { + listener.onResponse(new CcrStatsAction.StatsResponse(task.getStatus())); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCreateAndFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCreateAndFollowIndexAction.java new file mode 100644 index 00000000000..c6d1a7c36c5 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCreateAndFollowIndexAction.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.ActiveShardsObserver; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Consumer; + +public final class TransportCreateAndFollowIndexAction + extends TransportMasterNodeAction { + + private final Client client; + private final AllocationService allocationService; + private final RemoteClusterService remoteClusterService; + private final ActiveShardsObserver activeShardsObserver; + private final CcrLicenseChecker ccrLicenseChecker; + + @Inject + public TransportCreateAndFollowIndexAction( + final Settings settings, + final ThreadPool threadPool, + final TransportService transportService, + final ClusterService clusterService, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Client client, + final AllocationService allocationService, + final CcrLicenseChecker ccrLicenseChecker) { + super( + settings, + CreateAndFollowIndexAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver, + CreateAndFollowIndexAction.Request::new); + this.client = client; + this.allocationService = allocationService; + this.remoteClusterService = transportService.getRemoteClusterService(); + this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool); + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected CreateAndFollowIndexAction.Response newResponse() { + return new CreateAndFollowIndexAction.Response(); + } + + @Override + protected void masterOperation( + final CreateAndFollowIndexAction.Request request, + final ClusterState state, + final ActionListener listener) throws Exception { + if (ccrLicenseChecker.isCcrAllowed() == false) { + listener.onFailure(LicenseUtils.newComplianceException("ccr")); + return; + } + final String[] indices = new String[]{request.getFollowRequest().getLeaderIndex()}; + final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); + if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + createFollowerIndexAndFollowLocalIndex(request, state, listener); + } else { + assert remoteClusterIndices.size() == 1; + final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); + assert entry.getValue().size() == 1; + final String clusterAlias = entry.getKey(); + final String leaderIndex = entry.getValue().get(0); + createFollowerIndexAndFollowRemoteIndex(request, clusterAlias, leaderIndex, listener); + } + } + + private void createFollowerIndexAndFollowLocalIndex( + final CreateAndFollowIndexAction.Request request, + final ClusterState state, + final ActionListener listener) { + // following an index in local cluster, so use local cluster state to fetch leader index metadata + final String leaderIndex = request.getFollowRequest().getLeaderIndex(); + final IndexMetaData leaderIndexMetadata = state.getMetaData().index(leaderIndex); + Consumer handler = historyUUIDs -> { + createFollowerIndex(leaderIndexMetadata, historyUUIDs, request, listener); + }; + ccrLicenseChecker.fetchLeaderHistoryUUIDs(client, leaderIndexMetadata, listener::onFailure, handler); + } + + private void createFollowerIndexAndFollowRemoteIndex( + final CreateAndFollowIndexAction.Request request, + final String clusterAlias, + final String leaderIndex, + final ActionListener listener) { + ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadataAndHistoryUUIDs( + client, + clusterAlias, + leaderIndex, + listener::onFailure, + (historyUUID, leaderIndexMetaData) -> createFollowerIndex(leaderIndexMetaData, historyUUID, request, listener)); + } + + private void createFollowerIndex( + final IndexMetaData leaderIndexMetaData, + final String[] historyUUIDs, + final CreateAndFollowIndexAction.Request request, + final ActionListener listener) { + if (leaderIndexMetaData == null) { + listener.onFailure(new IllegalArgumentException("leader index [" + request.getFollowRequest().getLeaderIndex() + + "] does not exist")); + return; + } + + ActionListener handler = ActionListener.wrap( + result -> { + if (result) { + initiateFollowing(request, listener); + } else { + listener.onResponse(new CreateAndFollowIndexAction.Response(true, false, false)); + } + }, + listener::onFailure); + // Can't use create index api here, because then index templates can alter the mappings / settings. + // And index templates could introduce settings / mappings that are incompatible with the leader index. + clusterService.submitStateUpdateTask("follow_index_action", new AckedClusterStateUpdateTask(request, handler) { + + @Override + protected Boolean newResponse(final boolean acknowledged) { + return acknowledged; + } + + @Override + public ClusterState execute(final ClusterState currentState) throws Exception { + String followIndex = request.getFollowRequest().getFollowerIndex(); + IndexMetaData currentIndex = currentState.metaData().index(followIndex); + if (currentIndex != null) { + throw new ResourceAlreadyExistsException(currentIndex.getIndex()); + } + + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + IndexMetaData.Builder imdBuilder = IndexMetaData.builder(followIndex); + + // Adding the leader index uuid for each shard as custom metadata: + Map metadata = new HashMap<>(); + metadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS, String.join(",", historyUUIDs)); + imdBuilder.putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, metadata); + + // Copy all settings, but overwrite a few settings. + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(leaderIndexMetaData.getSettings()); + // Overwriting UUID here, because otherwise we can't follow indices in the same cluster + settingsBuilder.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); + settingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followIndex); + settingsBuilder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); + imdBuilder.settings(settingsBuilder); + + // Copy mappings from leader IMD to follow IMD + for (ObjectObjectCursor cursor : leaderIndexMetaData.getMappings()) { + imdBuilder.putMapping(cursor.value); + } + imdBuilder.setRoutingNumShards(leaderIndexMetaData.getRoutingNumShards()); + IndexMetaData followIMD = imdBuilder.build(); + mdBuilder.put(followIMD, false); + + ClusterState.Builder builder = ClusterState.builder(currentState); + builder.metaData(mdBuilder.build()); + ClusterState updatedState = builder.build(); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()) + .addAsNew(updatedState.metaData().index(request.getFollowRequest().getFollowerIndex())); + updatedState = allocationService.reroute( + ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), + "follow index [" + request.getFollowRequest().getFollowerIndex() + "] created"); + + logger.info("[{}] creating index, cause [ccr_create_and_follow], shards [{}]/[{}]", + followIndex, followIMD.getNumberOfShards(), followIMD.getNumberOfReplicas()); + + return updatedState; + } + }); + } + + private void initiateFollowing( + final CreateAndFollowIndexAction.Request request, + final ActionListener listener) { + activeShardsObserver.waitForActiveShards(new String[]{request.getFollowRequest().getFollowerIndex()}, + ActiveShardCount.DEFAULT, request.timeout(), result -> { + if (result) { + client.execute(FollowIndexAction.INSTANCE, request.getFollowRequest(), ActionListener.wrap( + r -> listener.onResponse(new CreateAndFollowIndexAction.Response(true, true, r.isAcknowledged())), + listener::onFailure + )); + } else { + listener.onResponse(new CreateAndFollowIndexAction.Response(true, false, false)); + } + }, listener::onFailure); + } + + @Override + protected ClusterBlockException checkBlock(final CreateAndFollowIndexAction.Request request, final ClusterState state) { + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getFollowRequest().getFollowerIndex()); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java index 6c1ca81e7c4..8d2e59defd8 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; +import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; import java.util.HashMap; import java.util.List; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java new file mode 100644 index 00000000000..fff3f1618aa --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java @@ -0,0 +1,369 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexingSlowLog; +import org.elasticsearch.index.SearchSlowLog; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesRequestCache; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.stream.Collectors; + +public class TransportFollowIndexAction extends HandledTransportAction { + + private final Client client; + private final ThreadPool threadPool; + private final ClusterService clusterService; + private final RemoteClusterService remoteClusterService; + private final PersistentTasksService persistentTasksService; + private final IndicesService indicesService; + private final CcrLicenseChecker ccrLicenseChecker; + + @Inject + public TransportFollowIndexAction( + final Settings settings, + final ThreadPool threadPool, + final TransportService transportService, + final ActionFilters actionFilters, + final Client client, + final ClusterService clusterService, + final PersistentTasksService persistentTasksService, + final IndicesService indicesService, + final CcrLicenseChecker ccrLicenseChecker) { + super(settings, FollowIndexAction.NAME, transportService, actionFilters, FollowIndexAction.Request::new); + this.client = client; + this.threadPool = threadPool; + this.clusterService = clusterService; + this.remoteClusterService = transportService.getRemoteClusterService(); + this.persistentTasksService = persistentTasksService; + this.indicesService = indicesService; + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + protected void doExecute(final Task task, + final FollowIndexAction.Request request, + final ActionListener listener) { + if (ccrLicenseChecker.isCcrAllowed() == false) { + listener.onFailure(LicenseUtils.newComplianceException("ccr")); + return; + } + final String[] indices = new String[]{request.getLeaderIndex()}; + final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); + if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + followLocalIndex(request, listener); + } else { + assert remoteClusterIndices.size() == 1; + final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); + assert entry.getValue().size() == 1; + final String clusterAlias = entry.getKey(); + final String leaderIndex = entry.getValue().get(0); + followRemoteIndex(request, clusterAlias, leaderIndex, listener); + } + } + + private void followLocalIndex(final FollowIndexAction.Request request, + final ActionListener listener) { + final ClusterState state = clusterService.state(); + final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); + // following an index in local cluster, so use local cluster state to fetch leader index metadata + final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getLeaderIndex()); + if (leaderIndexMetadata == null) { + throw new IndexNotFoundException(request.getFollowerIndex()); + } + ccrLicenseChecker.fetchLeaderHistoryUUIDs(client, leaderIndexMetadata, listener::onFailure, historyUUIDs -> { + try { + start(request, null, leaderIndexMetadata, followerIndexMetadata, historyUUIDs, listener); + } catch (final IOException e) { + listener.onFailure(e); + } + }); + } + + private void followRemoteIndex( + final FollowIndexAction.Request request, + final String clusterAlias, + final String leaderIndex, + final ActionListener listener) { + final ClusterState state = clusterService.state(); + final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); + ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadataAndHistoryUUIDs( + client, + clusterAlias, + leaderIndex, + listener::onFailure, + (leaderHistoryUUID, leaderIndexMetadata) -> { + try { + start(request, clusterAlias, leaderIndexMetadata, followerIndexMetadata, leaderHistoryUUID, listener); + } catch (final IOException e) { + listener.onFailure(e); + } + }); + } + + /** + * Performs validation on the provided leader and follow {@link IndexMetaData} instances and then + * creates a persistent task for each leader primary shard. This persistent tasks track changes in the leader + * shard and replicate these changes to a follower shard. + * + * Currently the following validation is performed: + *
    + *
  • The leader index and follow index need to have the same number of primary shards
  • + *
+ */ + void start( + FollowIndexAction.Request request, + String clusterNameAlias, + IndexMetaData leaderIndexMetadata, + IndexMetaData followIndexMetadata, + String[] leaderIndexHistoryUUIDs, + ActionListener handler) throws IOException { + + MapperService mapperService = followIndexMetadata != null ? indicesService.createIndexMapperService(followIndexMetadata) : null; + validate(request, leaderIndexMetadata, followIndexMetadata, leaderIndexHistoryUUIDs, mapperService); + final int numShards = followIndexMetadata.getNumberOfShards(); + final AtomicInteger counter = new AtomicInteger(numShards); + final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); + Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() + .filter(e -> ShardFollowTask.HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + for (int i = 0; i < numShards; i++) { + final int shardId = i; + String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; + String[] recordedLeaderShardHistoryUUIDs = extractIndexShardHistoryUUIDs(followIndexMetadata); + String recordedLeaderShardHistoryUUID = recordedLeaderShardHistoryUUIDs[shardId]; + + ShardFollowTask shardFollowTask = new ShardFollowTask( + clusterNameAlias, + new ShardId(followIndexMetadata.getIndex(), shardId), + new ShardId(leaderIndexMetadata.getIndex(), shardId), + request.getMaxBatchOperationCount(), + request.getMaxConcurrentReadBatches(), + request.getMaxOperationSizeInBytes(), + request.getMaxConcurrentWriteBatches(), + request.getMaxWriteBufferSize(), + request.getMaxRetryDelay(), + request.getIdleShardRetryDelay(), + recordedLeaderShardHistoryUUID, + filteredHeaders); + persistentTasksService.sendStartRequest(taskId, ShardFollowTask.NAME, shardFollowTask, + new ActionListener>() { + @Override + public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { + responses.set(shardId, task); + finalizeResponse(); + } + + @Override + public void onFailure(Exception e) { + responses.set(shardId, e); + finalizeResponse(); + } + + void finalizeResponse() { + Exception error = null; + if (counter.decrementAndGet() == 0) { + for (int j = 0; j < responses.length(); j++) { + Object response = responses.get(j); + if (response instanceof Exception) { + if (error == null) { + error = (Exception) response; + } else { + error.addSuppressed((Throwable) response); + } + } + } + + if (error == null) { + // include task ids? + handler.onResponse(new AcknowledgedResponse(true)); + } else { + // TODO: cancel all started tasks + handler.onFailure(error); + } + } + } + } + ); + } + } + + static void validate( + final FollowIndexAction.Request request, + final IndexMetaData leaderIndex, + final IndexMetaData followIndex, + final String[] leaderIndexHistoryUUID, + final MapperService followerMapperService) { + if (leaderIndex == null) { + throw new IllegalArgumentException("leader index [" + request.getLeaderIndex() + "] does not exist"); + } + if (followIndex == null) { + throw new IllegalArgumentException("follow index [" + request.getFollowerIndex() + "] does not exist"); + } + + String[] recordedHistoryUUIDs = extractIndexShardHistoryUUIDs(followIndex); + assert recordedHistoryUUIDs.length == leaderIndexHistoryUUID.length; + for (int i = 0; i < leaderIndexHistoryUUID.length; i++) { + String recordedLeaderIndexHistoryUUID = recordedHistoryUUIDs[i]; + String actualLeaderIndexHistoryUUID = leaderIndexHistoryUUID[i]; + if (recordedLeaderIndexHistoryUUID.equals(actualLeaderIndexHistoryUUID) == false) { + throw new IllegalArgumentException("leader shard [" + request.getFollowerIndex() + "][" + i + "] should reference [" + + recordedLeaderIndexHistoryUUID + "] as history uuid but instead reference [" + actualLeaderIndexHistoryUUID + + "] as history uuid"); + } + } + + if (leaderIndex.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) == false) { + throw new IllegalArgumentException("leader index [" + request.getLeaderIndex() + "] does not have soft deletes enabled"); + } + if (leaderIndex.getNumberOfShards() != followIndex.getNumberOfShards()) { + throw new IllegalArgumentException("leader index primary shards [" + leaderIndex.getNumberOfShards() + + "] does not match with the number of shards of the follow index [" + followIndex.getNumberOfShards() + "]"); + } + if (leaderIndex.getRoutingNumShards() != followIndex.getRoutingNumShards()) { + throw new IllegalArgumentException("leader index number_of_routing_shards [" + leaderIndex.getRoutingNumShards() + + "] does not match with the number_of_routing_shards of the follow index [" + followIndex.getRoutingNumShards() + "]"); + } + if (leaderIndex.getState() != IndexMetaData.State.OPEN || followIndex.getState() != IndexMetaData.State.OPEN) { + throw new IllegalArgumentException("leader and follow index must be open"); + } + if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(followIndex.getSettings()) == false) { + throw new IllegalArgumentException("the following index [" + request.getFollowerIndex() + "] is not ready " + + "to follow; the setting [" + CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey() + "] must be enabled."); + } + // Make a copy, remove settings that are allowed to be different and then compare if the settings are equal. + Settings leaderSettings = filter(leaderIndex.getSettings()); + Settings followerSettings = filter(followIndex.getSettings()); + if (leaderSettings.equals(followerSettings) == false) { + throw new IllegalArgumentException("the leader and follower index settings must be identical"); + } + + // Validates if the current follower mapping is mergable with the leader mapping. + // This also validates for example whether specific mapper plugins have been installed + followerMapperService.merge(leaderIndex, MapperService.MergeReason.MAPPING_RECOVERY); + } + + private static String[] extractIndexShardHistoryUUIDs(IndexMetaData followIndexMetadata) { + String historyUUIDs = followIndexMetadata.getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY) + .get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS); + return historyUUIDs.split(","); + } + + private static final Set> WHITE_LISTED_SETTINGS; + + static { + final Set> whiteListedSettings = new HashSet<>(); + whiteListedSettings.add(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING); + whiteListedSettings.add(IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING); + + whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING); + whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING); + whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING); + whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING); + whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING); + whiteListedSettings.add(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING); + + whiteListedSettings.add(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING); + whiteListedSettings.add(IndexSettings.MAX_RESULT_WINDOW_SETTING); + whiteListedSettings.add(IndexSettings.INDEX_WARMER_ENABLED_SETTING); + whiteListedSettings.add(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING); + whiteListedSettings.add(IndexSettings.MAX_RESCORE_WINDOW_SETTING); + whiteListedSettings.add(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING); + whiteListedSettings.add(IndexSettings.DEFAULT_FIELD_SETTING); + whiteListedSettings.add(IndexSettings.QUERY_STRING_LENIENT_SETTING); + whiteListedSettings.add(IndexSettings.QUERY_STRING_ANALYZE_WILDCARD); + whiteListedSettings.add(IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD); + whiteListedSettings.add(IndexSettings.ALLOW_UNMAPPED); + whiteListedSettings.add(IndexSettings.INDEX_SEARCH_IDLE_AFTER); + whiteListedSettings.add(BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); + + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING); + + whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_SETTING); + whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); + + WHITE_LISTED_SETTINGS = Collections.unmodifiableSet(whiteListedSettings); + } + + private static Settings filter(Settings originalSettings) { + Settings.Builder settings = Settings.builder().put(originalSettings); + // Remove settings that are always going to be different between leader and follow index: + settings.remove(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey()); + settings.remove(IndexMetaData.SETTING_INDEX_UUID); + settings.remove(IndexMetaData.SETTING_INDEX_PROVIDED_NAME); + settings.remove(IndexMetaData.SETTING_CREATION_DATE); + + Iterator iterator = settings.keys().iterator(); + while (iterator.hasNext()) { + String key = iterator.next(); + for (Setting whitelistedSetting : WHITE_LISTED_SETTINGS) { + if (whitelistedSetting.match(key)) { + iterator.remove(); + break; + } + } + } + return settings.build(); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java index a4ff9511cfb..4afd51f56e6 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; +import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; import java.util.ArrayList; import java.util.HashMap; @@ -149,7 +150,7 @@ public class TransportPutAutoFollowPatternAction extends request.getMaxOperationSizeInBytes(), request.getMaxConcurrentWriteBatches(), request.getMaxWriteBufferSize(), - request.getRetryTimeout(), + request.getMaxRetryDelay(), request.getIdleShardRetryDelay() ); patterns.put(request.getLeaderClusterAlias(), autoFollowPattern); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowIndexAction.java new file mode 100644 index 00000000000..05cde0eab85 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowIndexAction.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ccr.action.UnfollowIndexAction; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +public class TransportUnfollowIndexAction extends HandledTransportAction { + + private final Client client; + private final PersistentTasksService persistentTasksService; + + @Inject + public TransportUnfollowIndexAction( + final Settings settings, + final TransportService transportService, + final ActionFilters actionFilters, + final Client client, + final PersistentTasksService persistentTasksService) { + super(settings, UnfollowIndexAction.NAME, transportService, actionFilters, UnfollowIndexAction.Request::new); + this.client = client; + this.persistentTasksService = persistentTasksService; + } + + @Override + protected void doExecute( + final Task task, + final UnfollowIndexAction.Request request, + final ActionListener listener) { + + client.admin().cluster().state(new ClusterStateRequest(), ActionListener.wrap(r -> { + IndexMetaData followIndexMetadata = r.getState().getMetaData().index(request.getFollowIndex()); + if (followIndexMetadata == null) { + listener.onFailure(new IllegalArgumentException("follow index [" + request.getFollowIndex() + "] does not exist")); + return; + } + + final int numShards = followIndexMetadata.getNumberOfShards(); + final AtomicInteger counter = new AtomicInteger(numShards); + final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); + for (int i = 0; i < numShards; i++) { + final int shardId = i; + String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; + persistentTasksService.sendRemoveRequest(taskId, + new ActionListener>() { + @Override + public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { + responses.set(shardId, task); + finalizeResponse(); + } + + @Override + public void onFailure(Exception e) { + responses.set(shardId, e); + finalizeResponse(); + } + + void finalizeResponse() { + Exception error = null; + if (counter.decrementAndGet() == 0) { + for (int j = 0; j < responses.length(); j++) { + Object response = responses.get(j); + if (response instanceof Exception) { + if (error == null) { + error = (Exception) response; + } else { + error.addSuppressed((Throwable) response); + } + } + } + + if (error == null) { + // include task ids? + listener.onResponse(new AcknowledgedResponse(true)); + } else { + // TODO: cancel all started tasks + listener.onFailure(error); + } + } + } + }); + } + }, listener::onFailure)); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java deleted file mode 100644 index 93b2bcc3e40..00000000000 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ccr.action; - -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksService; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; - -public class UnfollowIndexAction extends Action { - - public static final UnfollowIndexAction INSTANCE = new UnfollowIndexAction(); - public static final String NAME = "cluster:admin/xpack/ccr/unfollow_index"; - - private UnfollowIndexAction() { - super(NAME); - } - - @Override - public AcknowledgedResponse newResponse() { - return new AcknowledgedResponse(); - } - - public static class Request extends ActionRequest { - - private String followIndex; - - public String getFollowIndex() { - return followIndex; - } - - public void setFollowIndex(String followIndex) { - this.followIndex = followIndex; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - followIndex = in.readString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(followIndex); - } - } - - public static class TransportAction extends HandledTransportAction { - - private final Client client; - private final PersistentTasksService persistentTasksService; - - @Inject - public TransportAction(Settings settings, - TransportService transportService, - ActionFilters actionFilters, - Client client, - PersistentTasksService persistentTasksService) { - super(settings, NAME, transportService, actionFilters, Request::new); - this.client = client; - this.persistentTasksService = persistentTasksService; - } - - @Override - protected void doExecute(Task task, - Request request, - ActionListener listener) { - - client.admin().cluster().state(new ClusterStateRequest(), ActionListener.wrap(r -> { - IndexMetaData followIndexMetadata = r.getState().getMetaData().index(request.followIndex); - if (followIndexMetadata == null) { - listener.onFailure(new IllegalArgumentException("follow index [" + request.followIndex + "] does not exist")); - return; - } - - final int numShards = followIndexMetadata.getNumberOfShards(); - final AtomicInteger counter = new AtomicInteger(numShards); - final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); - for (int i = 0; i < numShards; i++) { - final int shardId = i; - String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; - persistentTasksService.sendRemoveRequest(taskId, - new ActionListener>() { - @Override - public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { - responses.set(shardId, task); - finalizeResponse(); - } - - @Override - public void onFailure(Exception e) { - responses.set(shardId, e); - finalizeResponse(); - } - - void finalizeResponse() { - Exception error = null; - if (counter.decrementAndGet() == 0) { - for (int j = 0; j < responses.length(); j++) { - Object response = responses.get(j); - if (response instanceof Exception) { - if (error == null) { - error = (Exception) response; - } else { - error.addSuppressed((Throwable) response); - } - } - } - - if (error == null) { - // include task ids? - listener.onResponse(new AcknowledgedResponse(true)); - } else { - // TODO: cancel all started tasks - listener.onFailure(error); - } - } - } - }); - } - }, listener::onFailure)); - } - } - -} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java index df34fd6cd45..de285dba19e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import java.io.IOException; @@ -33,7 +33,7 @@ public class RestCcrStatsAction extends BaseRestHandler { @Override protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) throws IOException { - final CcrStatsAction.TasksRequest request = new CcrStatsAction.TasksRequest(); + final CcrStatsAction.StatsRequest request = new CcrStatsAction.StatsRequest(); request.setIndices(Strings.splitStringByCommaToArray(restRequest.param("index"))); request.setIndicesOptions(IndicesOptions.fromRequest(restRequest, request.indicesOptions())); return channel -> client.execute(CcrStatsAction.INSTANCE, request, new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java index 4d9079b36c9..8816760f526 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java @@ -14,8 +14,8 @@ import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; -import static org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction.INSTANCE; -import static org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction.Request; +import static org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction.INSTANCE; +import static org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction.Request; public class RestCreateAndFollowIndexAction extends BaseRestHandler { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java index d25e9bf65fd..91a607de27b 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java @@ -11,11 +11,11 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.ccr.action.DeleteAutoFollowPatternAction.Request; +import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction.Request; import java.io.IOException; -import static org.elasticsearch.xpack.ccr.action.DeleteAutoFollowPatternAction.INSTANCE; +import static org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction.INSTANCE; public class RestDeleteAutoFollowPatternAction extends BaseRestHandler { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java index 88f5b74f4b1..8a1d7d778bd 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java @@ -15,8 +15,8 @@ import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; -import static org.elasticsearch.xpack.ccr.action.FollowIndexAction.INSTANCE; -import static org.elasticsearch.xpack.ccr.action.FollowIndexAction.Request; +import static org.elasticsearch.xpack.core.ccr.action.FollowIndexAction.INSTANCE; +import static org.elasticsearch.xpack.core.ccr.action.FollowIndexAction.Request; public class RestFollowIndexAction extends BaseRestHandler { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java index 9b3aac3bbb5..6b9a4aeff20 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java @@ -12,11 +12,11 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction.Request; +import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction.Request; import java.io.IOException; -import static org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction.INSTANCE; +import static org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction.INSTANCE; public class RestPutAutoFollowPatternAction extends BaseRestHandler { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java index 2df6c77379b..9a82717b621 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java @@ -14,8 +14,8 @@ import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; -import static org.elasticsearch.xpack.ccr.action.UnfollowIndexAction.INSTANCE; -import static org.elasticsearch.xpack.ccr.action.UnfollowIndexAction.Request; +import static org.elasticsearch.xpack.core.ccr.action.UnfollowIndexAction.INSTANCE; +import static org.elasticsearch.xpack.core.ccr.action.UnfollowIndexAction.Request; public class RestUnfollowIndexAction extends BaseRestHandler { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java index 2d58358d11f..d8bf2872547 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java @@ -22,13 +22,12 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator; -import org.elasticsearch.xpack.ccr.action.CcrStatsAction; -import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; -import org.elasticsearch.xpack.ccr.action.FollowIndexAction; -import org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction; -import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; +import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; import java.util.Collection; import java.util.Collections; @@ -91,9 +90,9 @@ public class CcrLicenseIT extends ESSingleNodeTestCase { public void testThatCcrStatsAreUnavailableWithNonCompliantLicense() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); - client().execute(CcrStatsAction.INSTANCE, new CcrStatsAction.TasksRequest(), new ActionListener() { + client().execute(CcrStatsAction.INSTANCE, new CcrStatsAction.StatsRequest(), new ActionListener() { @Override - public void onResponse(final CcrStatsAction.TasksResponse tasksResponse) { + public void onResponse(final CcrStatsAction.StatsResponses statsResponses) { latch.countDown(); fail(); } @@ -196,11 +195,11 @@ public class CcrLicenseIT extends ESSingleNodeTestCase { return new FollowIndexAction.Request( "leader", "follower", - ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT, - ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES, - ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, - ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, - ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, + FollowIndexAction.DEFAULT_MAX_BATCH_OPERATION_COUNT, + FollowIndexAction.DEFAULT_MAX_CONCURRENT_READ_BATCHES, + FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, + FollowIndexAction.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, + FollowIndexAction.DEFAULT_MAX_WRITE_BUFFER_SIZE, TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10)); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java index 7980e128140..f4291ddc8dd 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java @@ -27,7 +27,9 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -38,13 +40,13 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; -import org.elasticsearch.xpack.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import org.elasticsearch.xpack.ccr.action.ShardChangesAction; -import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; import org.elasticsearch.xpack.ccr.action.ShardFollowTask; -import org.elasticsearch.xpack.ccr.action.UnfollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.UnfollowIndexAction; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import java.io.IOException; import java.util.Arrays; @@ -116,7 +118,8 @@ public class ShardChangesIT extends ESIntegTestCase { long globalCheckPoint = shardStats.getSeqNoStats().getGlobalCheckpoint(); assertThat(globalCheckPoint, equalTo(2L)); - ShardChangesAction.Request request = new ShardChangesAction.Request(shardStats.getShardRouting().shardId()); + String historyUUID = shardStats.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY); + ShardChangesAction.Request request = new ShardChangesAction.Request(shardStats.getShardRouting().shardId(), historyUUID); request.setFromSeqNo(0L); request.setMaxOperationCount(3); ShardChangesAction.Response response = client().execute(ShardChangesAction.INSTANCE, request).get(); @@ -141,7 +144,7 @@ public class ShardChangesIT extends ESIntegTestCase { globalCheckPoint = shardStats.getSeqNoStats().getGlobalCheckpoint(); assertThat(globalCheckPoint, equalTo(5L)); - request = new ShardChangesAction.Request(shardStats.getShardRouting().shardId()); + request = new ShardChangesAction.Request(shardStats.getShardRouting().shardId(), historyUUID); request.setFromSeqNo(3L); request.setMaxOperationCount(3); response = client().execute(ShardChangesAction.INSTANCE, request).get(); @@ -335,7 +338,7 @@ public class ShardChangesIT extends ESIntegTestCase { final FollowIndexAction.Request followRequest = new FollowIndexAction.Request("index1", "index2", randomIntBetween(32, 2048), randomIntBetween(2, 10), Long.MAX_VALUE, randomIntBetween(2, 10), - ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + FollowIndexAction.DEFAULT_MAX_WRITE_BUFFER_SIZE, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); client().execute(FollowIndexAction.INSTANCE, followRequest).get(); long maxNumDocsReplicated = Math.min(1000, randomLongBetween(followRequest.getMaxBatchOperationCount(), @@ -357,16 +360,11 @@ public class ShardChangesIT extends ESIntegTestCase { final String leaderIndexSettings = getIndexSettingsWithNestedMapping(1, between(0, 1), singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); - - final String followerIndexSettings = - getIndexSettingsWithNestedMapping(1, between(0, 1), singletonMap(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), "true")); - assertAcked(client().admin().indices().prepareCreate("index2").setSource(followerIndexSettings, XContentType.JSON)); - internalCluster().ensureAtLeastNumDataNodes(2); - ensureGreen("index1", "index2"); + ensureGreen("index1"); final FollowIndexAction.Request followRequest = createFollowRequest("index1", "index2"); - client().execute(FollowIndexAction.INSTANCE, followRequest).get(); + client().execute(CreateAndFollowIndexAction.INSTANCE, new CreateAndFollowIndexAction.Request(followRequest)).get(); final int numDocs = randomIntBetween(2, 64); for (int i = 0; i < numDocs; i++) { @@ -409,13 +407,13 @@ public class ShardChangesIT extends ESIntegTestCase { assertAcked(client().admin().indices().prepareCreate("test-follower").get()); // Leader index does not exist. FollowIndexAction.Request followRequest1 = createFollowRequest("non-existent-leader", "test-follower"); - expectThrows(IllegalArgumentException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest1).actionGet()); + expectThrows(IndexNotFoundException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest1).actionGet()); // Follower index does not exist. FollowIndexAction.Request followRequest2 = createFollowRequest("non-test-leader", "non-existent-follower"); - expectThrows(IllegalArgumentException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest2).actionGet()); + expectThrows(IndexNotFoundException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest2).actionGet()); // Both indices do not exist. FollowIndexAction.Request followRequest3 = createFollowRequest("non-existent-leader", "non-existent-follower"); - expectThrows(IllegalArgumentException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest3).actionGet()); + expectThrows(IndexNotFoundException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest3).actionGet()); } @TestLogging("_root:DEBUG") @@ -507,7 +505,7 @@ public class ShardChangesIT extends ESIntegTestCase { } } assertThat(taskInfo, notNullValue()); - ShardFollowNodeTask.Status status = (ShardFollowNodeTask.Status) taskInfo.getStatus(); + ShardFollowNodeTaskStatus status = (ShardFollowNodeTaskStatus) taskInfo.getStatus(); assertThat(status, notNullValue()); assertThat("incorrect global checkpoint " + shardFollowTaskParams, status.followerGlobalCheckpoint(), @@ -665,9 +663,9 @@ public class ShardChangesIT extends ESIntegTestCase { } public static FollowIndexAction.Request createFollowRequest(String leaderIndex, String followIndex) { - return new FollowIndexAction.Request(leaderIndex, followIndex, ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT, - ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES, ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, - ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, + return new FollowIndexAction.Request(leaderIndex, followIndex, FollowIndexAction.DEFAULT_MAX_BATCH_OPERATION_COUNT, + FollowIndexAction.DEFAULT_MAX_CONCURRENT_READ_BATCHES, FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, + FollowIndexAction.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, FollowIndexAction.DEFAULT_MAX_WRITE_BUFFER_SIZE, TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10)); } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 2ef84129232..5ab11cf5b0c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollower; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import java.util.ArrayList; import java.util.Collections; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java index a4808e428fe..514c233188a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java @@ -15,6 +15,8 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xpack.ccr.LocalStateCcr; +import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; +import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; import java.util.Arrays; import java.util.Collection; @@ -131,7 +133,7 @@ public class AutoFollowTests extends ESSingleNodeTestCase { request.setMaxOperationSizeInBytes(randomNonNegativeLong()); } if (randomBoolean()) { - request.setRetryTimeout(TimeValue.timeValueMillis(500)); + request.setMaxRetryDelay(TimeValue.timeValueMillis(500)); } if (randomBoolean()) { request.setIdleShardRetryDelay(TimeValue.timeValueMillis(500)); @@ -162,8 +164,8 @@ public class AutoFollowTests extends ESSingleNodeTestCase { if (request.getMaxOperationSizeInBytes() != null) { assertThat(shardFollowTask.getMaxBatchSizeInBytes(), equalTo(request.getMaxOperationSizeInBytes())); } - if (request.getRetryTimeout() != null) { - assertThat(shardFollowTask.getRetryTimeout(), equalTo(request.getRetryTimeout())); + if (request.getMaxRetryDelay() != null) { + assertThat(shardFollowTask.getMaxRetryDelay(), equalTo(request.getMaxRetryDelay())); } if (request.getIdleShardRetryDelay() != null) { assertThat(shardFollowTask.getIdleShardRetryDelay(), equalTo(request.getIdleShardRetryDelay())); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java index c68d1849965..c751ca5f000 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; public class CreateAndFollowIndexRequestTests extends AbstractStreamableTestCase { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java index 11a518ef067..44ac21055a7 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; public class CreateAndFollowIndexResponseTests extends AbstractStreamableTestCase { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java index 7202f7202c6..2017fa2fdb9 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import java.io.IOException; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java index 430e9cb48b1..b973fbac3ce 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java @@ -59,22 +59,27 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { int max = randomIntBetween(min, numWrites - 1); int size = max - min + 1; final Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, - indexShard.getGlobalCheckpoint(), min, size, Long.MAX_VALUE); + indexShard.getGlobalCheckpoint(), min, size, indexShard.getHistoryUUID(), Long.MAX_VALUE); final List seenSeqNos = Arrays.stream(operations).map(Translog.Operation::seqNo).collect(Collectors.toList()); final List expectedSeqNos = LongStream.rangeClosed(min, max).boxed().collect(Collectors.toList()); assertThat(seenSeqNos, equalTo(expectedSeqNos)); } - // get operations for a range no operations exists: Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), - numWrites, numWrites + 1, Long.MAX_VALUE); + numWrites, numWrites + 1, indexShard.getHistoryUUID(), Long.MAX_VALUE); assertThat(operations.length, equalTo(0)); // get operations for a range some operations do not exist: operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), - numWrites - 10, numWrites + 10, Long.MAX_VALUE); + numWrites - 10, numWrites + 10, indexShard.getHistoryUUID(), Long.MAX_VALUE); assertThat(operations.length, equalTo(10)); + + // Unexpected history UUID: + Exception e = expectThrows(IllegalStateException.class, () -> ShardChangesAction.getOperations(indexShard, + indexShard.getGlobalCheckpoint(), 0, 10, "different-history-uuid", Long.MAX_VALUE)); + assertThat(e.getMessage(), equalTo("unexpected history uuid, expected [different-history-uuid], actual [" + + indexShard.getHistoryUUID() + "]")); } public void testGetOperationsWhenShardNotStarted() throws Exception { @@ -83,7 +88,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { ShardRouting shardRouting = TestShardRouting.newShardRouting("index", 0, "_node_id", true, ShardRoutingState.INITIALIZING); Mockito.when(indexShard.routingEntry()).thenReturn(shardRouting); expectThrows(IndexShardNotStartedException.class, () -> ShardChangesAction.getOperations(indexShard, - indexShard.getGlobalCheckpoint(), 0, 1, Long.MAX_VALUE)); + indexShard.getGlobalCheckpoint(), 0, 1, indexShard.getHistoryUUID(), Long.MAX_VALUE)); } public void testGetOperationsExceedByteLimit() throws Exception { @@ -100,7 +105,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { final IndexShard indexShard = indexService.getShard(0); final Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), - 0, 12, 256); + 0, 12, indexShard.getHistoryUUID(), 256); assertThat(operations.length, equalTo(12)); assertThat(operations[0].seqNo(), equalTo(0L)); assertThat(operations[1].seqNo(), equalTo(1L)); @@ -127,7 +132,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { final IndexShard indexShard = indexService.getShard(0); final Translog.Operation[] operations = - ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), 0, 1, 0); + ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), 0, 1, indexShard.getHistoryUUID(), 0); assertThat(operations.length, equalTo(1)); assertThat(operations[0].seqNo(), equalTo(0L)); } @@ -137,7 +142,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { final AtomicReference reference = new AtomicReference<>(); final ShardChangesAction.TransportAction transportAction = node().injector().getInstance(ShardChangesAction.TransportAction.class); transportAction.execute( - new ShardChangesAction.Request(new ShardId(new Index("non-existent", "uuid"), 0)), + new ShardChangesAction.Request(new ShardId(new Index("non-existent", "uuid"), 0), "uuid"), new ActionListener() { @Override public void onResponse(final ShardChangesAction.Response response) { @@ -162,7 +167,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { final AtomicReference reference = new AtomicReference<>(); final ShardChangesAction.TransportAction transportAction = node().injector().getInstance(ShardChangesAction.TransportAction.class); transportAction.execute( - new ShardChangesAction.Request(new ShardId(indexService.getMetaData().getIndex(), numberOfShards)), + new ShardChangesAction.Request(new ShardId(indexService.getMetaData().getIndex(), numberOfShards), "uuid"), new ActionListener() { @Override public void onResponse(final ShardChangesAction.Response response) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesRequestTests.java index 19585da8851..2ea2086990b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesRequestTests.java @@ -15,7 +15,8 @@ public class ShardChangesRequestTests extends AbstractStreamableTestCase { - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.leaderGlobalCheckpoint(), equalTo(testRun.finalExpectedGlobalCheckpoint)); assertThat(status.followerGlobalCheckpoint(), equalTo(testRun.finalExpectedGlobalCheckpoint)); final long numberOfFailedFetches = @@ -65,7 +67,7 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase { task.markAsCompleted(); assertBusy(() -> { - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(0)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); }); @@ -73,10 +75,20 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase { private ShardFollowNodeTask createShardFollowTask(int concurrency, TestRun testRun) { AtomicBoolean stopped = new AtomicBoolean(false); - ShardFollowTask params = new ShardFollowTask(null, new ShardId("follow_index", "", 0), - new ShardId("leader_index", "", 0), testRun.maxOperationCount, concurrency, - ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, concurrency, 10240, - TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap()); + ShardFollowTask params = new ShardFollowTask( + null, + new ShardId("follow_index", "", 0), + new ShardId("leader_index", "", 0), + testRun.maxOperationCount, + concurrency, + FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, + concurrency, + 10240, + TimeValue.timeValueMillis(10), + TimeValue.timeValueMillis(10), + "uuid", + Collections.emptyMap() + ); ThreadPool threadPool = new TestThreadPool(getClass().getSimpleName()); BiConsumer scheduler = (delay, task) -> { @@ -213,8 +225,16 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase { byte[] source = "{}".getBytes(StandardCharsets.UTF_8); ops.add(new Translog.Index("doc", id, seqNo, 0, source)); } - item.add(new TestResponse(null, mappingVersion, - new ShardChangesAction.Response(mappingVersion, nextGlobalCheckPoint, nextGlobalCheckPoint, ops.toArray(EMPTY)))); + item.add(new TestResponse( + null, + mappingVersion, + new ShardChangesAction.Response( + mappingVersion, + nextGlobalCheckPoint, + nextGlobalCheckPoint, + ops.toArray(EMPTY)) + ) + ); responses.put(prevGlobalCheckpoint, item); } else { // Simulates a leader shard copy not having all the operations the shard follow task thinks it has by @@ -230,8 +250,12 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase { } // Sometimes add an empty shard changes response to also simulate a leader shard lagging behind if (sometimes()) { - ShardChangesAction.Response response = - new ShardChangesAction.Response(mappingVersion, prevGlobalCheckpoint, prevGlobalCheckpoint, EMPTY); + ShardChangesAction.Response response = new ShardChangesAction.Response( + mappingVersion, + prevGlobalCheckpoint, + prevGlobalCheckpoint, + EMPTY + ); item.add(new TestResponse(null, mappingVersion, response)); } List ops = new ArrayList<>(); @@ -242,8 +266,12 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase { } // Report toSeqNo to simulate maxBatchSizeInBytes limit being met or last op to simulate a shard lagging behind: long localLeaderGCP = randomBoolean() ? ops.get(ops.size() - 1).seqNo() : toSeqNo; - ShardChangesAction.Response response = - new ShardChangesAction.Response(mappingVersion, localLeaderGCP, localLeaderGCP, ops.toArray(EMPTY)); + ShardChangesAction.Response response = new ShardChangesAction.Response( + mappingVersion, + localLeaderGCP, + localLeaderGCP, + ops.toArray(EMPTY) + ); item.add(new TestResponse(null, mappingVersion, response)); responses.put(fromSeqNo, Collections.unmodifiableList(item)); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java index 8368a818e00..d5f2ab7ea08 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import java.io.IOException; import java.util.Map; @@ -21,17 +22,18 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class ShardFollowNodeTaskStatusTests extends AbstractSerializingTestCase { +public class ShardFollowNodeTaskStatusTests extends AbstractSerializingTestCase { @Override - protected ShardFollowNodeTask.Status doParseInstance(XContentParser parser) throws IOException { - return ShardFollowNodeTask.Status.fromXContent(parser); + protected ShardFollowNodeTaskStatus doParseInstance(XContentParser parser) throws IOException { + return ShardFollowNodeTaskStatus.fromXContent(parser); } @Override - protected ShardFollowNodeTask.Status createTestInstance() { + protected ShardFollowNodeTaskStatus createTestInstance() { // if you change this constructor, reflect the changes in the hand-written assertions below - return new ShardFollowNodeTask.Status( + return new ShardFollowNodeTaskStatus( + randomAlphaOfLength(4), randomAlphaOfLength(4), randomInt(), randomNonNegativeLong(), @@ -57,9 +59,10 @@ public class ShardFollowNodeTaskStatusTests extends AbstractSerializingTestCase< } @Override - protected void assertEqualInstances(final ShardFollowNodeTask.Status expectedInstance, final ShardFollowNodeTask.Status newInstance) { + protected void assertEqualInstances(final ShardFollowNodeTaskStatus expectedInstance, final ShardFollowNodeTaskStatus newInstance) { assertNotSame(expectedInstance, newInstance); assertThat(newInstance.leaderIndex(), equalTo(expectedInstance.leaderIndex())); + assertThat(newInstance.followerIndex(), equalTo(expectedInstance.followerIndex())); assertThat(newInstance.getShardId(), equalTo(expectedInstance.getShardId())); assertThat(newInstance.leaderGlobalCheckpoint(), equalTo(expectedInstance.leaderGlobalCheckpoint())); assertThat(newInstance.leaderMaxSeqNo(), equalTo(expectedInstance.leaderMaxSeqNo())); @@ -108,8 +111,8 @@ public class ShardFollowNodeTaskStatusTests extends AbstractSerializingTestCase< } @Override - protected Writeable.Reader instanceReader() { - return ShardFollowNodeTask.Status::new; + protected Writeable.Reader instanceReader() { + return ShardFollowNodeTaskStatus::new; } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index 4f7c0bf1664..101b2580759 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import java.net.ConnectException; import java.nio.charset.StandardCharsets; @@ -29,12 +30,13 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.LongConsumer; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.sameInstance; public class ShardFollowNodeTaskTests extends ESTestCase { @@ -44,7 +46,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { private List> bulkShardOperationRequests; private BiConsumer scheduler = (delay, task) -> task.run(); - private Consumer beforeSendShardChangesRequest = status -> {}; + private Consumer beforeSendShardChangesRequest = status -> {}; private AtomicBoolean simulateResponse = new AtomicBoolean(); @@ -66,7 +68,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(shardChangesRequests, contains(new long[][]{ {6L, 8L}, {14L, 8L}, {22L, 8L}, {30L, 8L}, {38L, 8L}, {46L, 8L}, {54L, 7L}} )); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(7)); assertThat(status.lastRequestedSeqNo(), equalTo(60L)); } @@ -86,7 +88,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { task.innerHandleReadResponse(0L, 63L, generateShardChangesResponse(0, 63, 0L, 128L)); assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because write buffer is full - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(0)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -102,7 +104,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); assertThat(shardChangesRequests.get(0)[1], equalTo(8L)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(7L)); } @@ -140,7 +142,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because task has been cancelled assertThat(bulkShardOperationRequests.size(), equalTo(0)); // no more writes, because task has been cancelled - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(0)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(15L)); @@ -164,7 +166,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because task has been cancelled assertThat(bulkShardOperationRequests.size(), equalTo(0)); // no more writes, because task has been cancelled - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(0)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -176,7 +178,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); startTask(task, 63, -1); - int max = randomIntBetween(1, 10); + int max = randomIntBetween(1, 30); for (int i = 0; i < max; i++) { readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); } @@ -211,7 +213,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { } assertFalse("task is not stopped", task.isStopped()); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.numberOfFailedFetches(), equalTo((long)max)); @@ -222,59 +224,6 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } - public void testReceiveRetryableErrorRetriedTooManyTimes() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); - startTask(task, 63, -1); - - int max = randomIntBetween(11, 32); - for (int i = 0; i < max; i++) { - readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); - } - final AtomicLong retryCounter = new AtomicLong(); - // before each retry, we assert the fetch failures; after the last retry, the fetch failure should persist - beforeSendShardChangesRequest = status -> { - assertThat(status.numberOfFailedFetches(), equalTo(retryCounter.get())); - if (retryCounter.get() > 0) { - assertThat(status.fetchExceptions().entrySet(), hasSize(1)); - final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); - assertThat(entry.getKey(), equalTo(0L)); - assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); - assertNotNull(entry.getValue().getCause()); - assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); - final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); - assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); - assertThat(cause.getShardId().getId(), equalTo(0)); - } - retryCounter.incrementAndGet(); - }; - task.coordinateReads(); - - assertThat(shardChangesRequests.size(), equalTo(11)); - for (long[] shardChangesRequest : shardChangesRequests) { - assertThat(shardChangesRequest[0], equalTo(0L)); - assertThat(shardChangesRequest[1], equalTo(64L)); - } - - assertTrue("task is stopped", task.isStopped()); - assertThat(fatalError, notNullValue()); - assertThat(fatalError.getMessage(), containsString("retrying failed [")); - ShardFollowNodeTask.Status status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); - assertThat(status.numberOfFailedFetches(), equalTo(11L)); - assertThat(status.fetchExceptions().entrySet(), hasSize(1)); - final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); - assertThat(entry.getKey(), equalTo(0L)); - assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); - assertNotNull(entry.getValue().getCause()); - assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); - final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); - assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); - assertThat(cause.getShardId().getId(), equalTo(0)); - assertThat(status.lastRequestedSeqNo(), equalTo(63L)); - assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); - } - public void testReceiveNonRetryableError() { ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); startTask(task, 63, -1); @@ -299,7 +248,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertTrue("task is stopped", task.isStopped()); assertThat(fatalError, sameInstance(failure)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.numberOfFailedFetches(), equalTo(1L)); @@ -326,7 +275,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(bulkShardOperationRequests.size(), equalTo(1)); assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.mappingVersion(), equalTo(0L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); @@ -353,7 +302,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(shardChangesRequests.get(0)[0], equalTo(21L)); assertThat(shardChangesRequests.get(0)[1], equalTo(43L)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -376,7 +325,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(shardChangesRequests.size(), equalTo(0)); assertThat(bulkShardOperationRequests.size(), equalTo(0)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(0)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -399,7 +348,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -441,7 +390,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(bulkShardOperationRequests.size(), equalTo(1)); assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.mappingVersion(), equalTo(1L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); @@ -454,7 +403,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); startTask(task, 63, -1); - int max = randomIntBetween(1, 10); + int max = randomIntBetween(1, 30); for (int i = 0; i < max; i++) { mappingUpdateFailures.add(new ConnectException()); } @@ -466,7 +415,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(mappingUpdateFailures.size(), equalTo(0)); assertThat(bulkShardOperationRequests.size(), equalTo(1)); assertThat(task.isStopped(), equalTo(false)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.mappingVersion(), equalTo(1L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); @@ -475,31 +424,6 @@ public class ShardFollowNodeTaskTests extends ESTestCase { } - public void testMappingUpdateRetryableErrorRetriedTooManyTimes() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); - startTask(task, 63, -1); - - int max = randomIntBetween(11, 20); - for (int i = 0; i < max; i++) { - mappingUpdateFailures.add(new ConnectException()); - } - mappingVersions.add(1L); - task.coordinateReads(); - ShardChangesAction.Response response = generateShardChangesResponse(0, 64, 1L, 64L); - task.handleReadResponse(0L, 64L, response); - - assertThat(mappingUpdateFailures.size(), equalTo(max - 11)); - assertThat(mappingVersions.size(), equalTo(1)); - assertThat(bulkShardOperationRequests.size(), equalTo(0)); - assertThat(task.isStopped(), equalTo(true)); - ShardFollowNodeTask.Status status = task.getStatus(); - assertThat(status.mappingVersion(), equalTo(0L)); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); - assertThat(status.lastRequestedSeqNo(), equalTo(63L)); - assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); - } - public void testMappingUpdateNonRetryableError() { ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); startTask(task, 63, -1); @@ -511,7 +435,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(bulkShardOperationRequests.size(), equalTo(0)); assertThat(task.isStopped(), equalTo(true)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.mappingVersion(), equalTo(0L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); @@ -535,7 +459,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(bulkShardOperationRequests.size(), equalTo(1)); assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -553,7 +477,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()).subList(0, 64))); assertThat(bulkShardOperationRequests.get(1), equalTo(Arrays.asList(response.getOperations()).subList(64, 128))); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentWrites(), equalTo(2)); task = createShardFollowTask(64, 1, 4, Integer.MAX_VALUE, Long.MAX_VALUE); @@ -583,7 +507,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(bulkShardOperationRequests.get(i), equalTo(Arrays.asList(response.getOperations()).subList(offset, offset + 8))); } - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentWrites(), equalTo(32)); } @@ -596,7 +520,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); - int max = randomIntBetween(1, 10); + int max = randomIntBetween(1, 30); for (int i = 0; i < max; i++) { writeFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); } @@ -610,35 +534,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(operations, equalTo(Arrays.asList(response.getOperations()))); } assertThat(task.isStopped(), equalTo(false)); - ShardFollowNodeTask.Status status = task.getStatus(); - assertThat(status.numberOfConcurrentWrites(), equalTo(1)); - assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); - } - - public void testRetryableErrorRetriedTooManyTimes() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); - startTask(task, 63, -1); - - task.coordinateReads(); - assertThat(shardChangesRequests.size(), equalTo(1)); - assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); - assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); - - int max = randomIntBetween(11, 32); - for (int i = 0; i < max; i++) { - writeFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); - } - ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 643); - // Also invokes coordinatesWrites() - task.innerHandleReadResponse(0L, 63L, response); - - // Number of requests is equal to initial request + retried attempts: - assertThat(bulkShardOperationRequests.size(), equalTo(11)); - for (List operations : bulkShardOperationRequests) { - assertThat(operations, equalTo(Arrays.asList(response.getOperations()))); - } - assertThat(task.isStopped(), equalTo(true)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); } @@ -660,7 +556,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(bulkShardOperationRequests.size(), equalTo(1)); assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); assertThat(task.isStopped(), equalTo(true)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); } @@ -704,19 +600,47 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(shardChangesRequests.get(0)[0], equalTo(64L)); assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); assertThat(status.followerGlobalCheckpoint(), equalTo(63L)); } - ShardFollowNodeTask createShardFollowTask(int maxBatchOperationCount, int maxConcurrentReadBatches, int maxConcurrentWriteBatches, - int bufferWriteLimit, long maxBatchSizeInBytes) { + public void testComputeDelay() { + long maxDelayInMillis = 1000; + assertThat(ShardFollowNodeTask.computeDelay(0, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(50L))); + assertThat(ShardFollowNodeTask.computeDelay(1, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(50L))); + assertThat(ShardFollowNodeTask.computeDelay(2, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(100L))); + assertThat(ShardFollowNodeTask.computeDelay(3, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(200L))); + assertThat(ShardFollowNodeTask.computeDelay(4, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(400L))); + assertThat(ShardFollowNodeTask.computeDelay(5, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(800L))); + assertThat(ShardFollowNodeTask.computeDelay(6, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(1000L))); + assertThat(ShardFollowNodeTask.computeDelay(7, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(1000L))); + assertThat(ShardFollowNodeTask.computeDelay(8, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(1000L))); + assertThat(ShardFollowNodeTask.computeDelay(1024, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(1000L))); + } + + private ShardFollowNodeTask createShardFollowTask(int maxBatchOperationCount, + int maxConcurrentReadBatches, + int maxConcurrentWriteBatches, + int bufferWriteLimit, + long maxBatchSizeInBytes) { AtomicBoolean stopped = new AtomicBoolean(false); - ShardFollowTask params = new ShardFollowTask(null, new ShardId("follow_index", "", 0), - new ShardId("leader_index", "", 0), maxBatchOperationCount, maxConcurrentReadBatches, maxBatchSizeInBytes, - maxConcurrentWriteBatches, bufferWriteLimit, TimeValue.ZERO, TimeValue.ZERO, Collections.emptyMap()); + ShardFollowTask params = new ShardFollowTask( + null, + new ShardId("follow_index", "", 0), + new ShardId("leader_index", "", 0), + maxBatchOperationCount, + maxConcurrentReadBatches, + maxBatchSizeInBytes, + maxConcurrentWriteBatches, + bufferWriteLimit, + TimeValue.ZERO, + TimeValue.ZERO, + "uuid", + Collections.emptyMap() + ); shardChangesRequests = new ArrayList<>(); bulkShardOperationRequests = new ArrayList<>(); @@ -777,12 +701,12 @@ public class ShardFollowNodeTaskTests extends ESTestCase { for (int i = 0; i < requestBatchSize; i++) { operations[i] = new Translog.NoOp(from + i, 0, "test"); } - final ShardChangesAction.Response response = - new ShardChangesAction.Response( - mappingVersions.poll(), - leaderGlobalCheckpoints.poll(), - maxSeqNos.poll(), - operations); + final ShardChangesAction.Response response = new ShardChangesAction.Response( + mappingVersions.poll(), + leaderGlobalCheckpoints.poll(), + maxSeqNos.poll(), + operations + ); handler.accept(response); } } @@ -814,7 +738,11 @@ public class ShardFollowNodeTaskTests extends ESTestCase { ops.add(new Translog.Index("doc", id, seqNo, 0, source)); } return new ShardChangesAction.Response( - mappingVersion, leaderGlobalCheckPoint, leaderGlobalCheckPoint, ops.toArray(new Translog.Operation[0])); + mappingVersion, + leaderGlobalCheckPoint, + leaderGlobalCheckPoint, + ops.toArray(new Translog.Operation[0]) + ); } void startTask(ShardFollowNodeTask task, long leaderGlobalCheckpoint, long followerGlobalCheckpoint) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 2cd024cb03c..9b04390a3a7 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -38,11 +38,13 @@ import java.util.Collections; import java.util.List; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.LongConsumer; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTestCase { @@ -129,6 +131,43 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest } } + public void testChangeHistoryUUID() throws Exception { + try (ReplicationGroup leaderGroup = createGroup(0); + ReplicationGroup followerGroup = createFollowGroup(0)) { + leaderGroup.startAll(); + int docCount = leaderGroup.appendDocs(randomInt(64)); + leaderGroup.assertAllEqual(docCount); + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( + leaderSeqNoStats.getGlobalCheckpoint(), + leaderSeqNoStats.getMaxSeqNo(), + followerSeqNoStats.getGlobalCheckpoint(), + followerSeqNoStats.getMaxSeqNo()); + leaderGroup.syncGlobalCheckpoint(); + leaderGroup.assertAllEqual(docCount); + Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size()); + }); + + String oldHistoryUUID = leaderGroup.getPrimary().getHistoryUUID(); + leaderGroup.reinitPrimaryShard(); + leaderGroup.getPrimary().store().bootstrapNewHistory(); + recoverShardFromStore(leaderGroup.getPrimary()); + String newHistoryUUID = leaderGroup.getPrimary().getHistoryUUID(); + + assertBusy(() -> { + assertThat(shardFollowTask.isStopped(), is(true)); + assertThat(shardFollowTask.getFailure().getMessage(), equalTo("unexpected history uuid, expected [" + oldHistoryUUID + + "], actual [" + newHistoryUUID + "]")); + }); + } + } + @Override protected ReplicationGroup createGroup(int replicas, Settings settings) throws IOException { Settings newSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) @@ -159,12 +198,23 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest } private ShardFollowNodeTask createShardFollowTask(ReplicationGroup leaderGroup, ReplicationGroup followerGroup) { - ShardFollowTask params = new ShardFollowTask(null, new ShardId("follow_index", "", 0), - new ShardId("leader_index", "", 0), between(1, 64), between(1, 8), Long.MAX_VALUE, between(1, 4), 10240, - TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap()); + ShardFollowTask params = new ShardFollowTask( + null, + new ShardId("follow_index", "", 0), + new ShardId("leader_index", "", 0), + between(1, 64), + between(1, 8), + Long.MAX_VALUE, + between(1, 4), 10240, + TimeValue.timeValueMillis(10), + TimeValue.timeValueMillis(10), + leaderGroup.getPrimary().getHistoryUUID(), + Collections.emptyMap() + ); BiConsumer scheduler = (delay, task) -> threadPool.schedule(delay, ThreadPool.Names.GENERIC, task); AtomicBoolean stopped = new AtomicBoolean(false); + AtomicReference failureHolder = new AtomicReference<>(); LongSet fetchOperations = new LongHashSet(); return new ShardFollowNodeTask( 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), params, scheduler, System::nanoTime) { @@ -210,10 +260,14 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest try { final SeqNoStats seqNoStats = indexShard.seqNoStats(); Translog.Operation[] ops = ShardChangesAction.getOperations(indexShard, seqNoStats.getGlobalCheckpoint(), from, - maxOperationCount, params.getMaxBatchSizeInBytes()); + maxOperationCount, params.getRecordedLeaderIndexHistoryUUID(), params.getMaxBatchSizeInBytes()); // hard code mapping version; this is ok, as mapping updates are not tested here - final ShardChangesAction.Response response = - new ShardChangesAction.Response(1L, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), ops); + final ShardChangesAction.Response response = new ShardChangesAction.Response( + 1L, + seqNoStats.getGlobalCheckpoint(), + seqNoStats.getMaxSeqNo(), + ops + ); handler.accept(response); return; } catch (Exception e) { @@ -238,9 +292,14 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest @Override public void markAsFailed(Exception e) { + failureHolder.set(e); stopped.set(true); } + @Override + public Exception getFailure() { + return failureHolder.get(); + } }; } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java index 300794a6c00..fa11ddf4bf9 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java @@ -34,7 +34,9 @@ public class ShardFollowTaskTests extends AbstractSerializingTestCase CUSTOM_METADATA = + singletonMap(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS, "uuid"); public void testValidation() throws IOException { FollowIndexAction.Request request = ShardChangesIT.createFollowRequest("index1", "index2"); + String[] UUIDs = new String[]{"uuid"}; { // should fail, because leader index does not exist - Exception e = expectThrows(IllegalArgumentException.class, () -> FollowIndexAction.validate(request, null, null, null)); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, null, null, null, null)); assertThat(e.getMessage(), equalTo("leader index [index1] does not exist")); } { // should fail, because follow index does not exist - IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY); - Exception e = expectThrows(IllegalArgumentException.class, () -> FollowIndexAction.validate(request, leaderIMD, null, null)); + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, emptyMap()); + Exception e = expectThrows(IllegalArgumentException.class, + () -> validate(request, leaderIMD, null, null, null)); assertThat(e.getMessage(), equalTo("follow index [index2] does not exist")); } { - // should fail because leader index does not have soft deletes enabled - IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY); - IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY); + // should fail because the recorded leader index history uuid is not equal to the leader actual index history uuid: + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, emptyMap()); + Map customMetaData = + singletonMap(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS, "another-uuid"); + IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY, customMetaData); Exception e = expectThrows(IllegalArgumentException.class, - () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + () -> validate(request, leaderIMD, followIMD, UUIDs, null)); + assertThat(e.getMessage(), equalTo("leader shard [index2][0] should reference [another-uuid] as history uuid but " + + "instead reference [uuid] as history uuid")); + } + { + // should fail because leader index does not have soft deletes enabled + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, emptyMap()); + IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY, CUSTOM_METADATA); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null)); assertThat(e.getMessage(), equalTo("leader index [index1] does not have soft deletes enabled")); } { // should fail because the number of primary shards between leader and follow index are not equal IndexMetaData leaderIMD = createIMD("index1", 5, Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); - IndexMetaData followIMD = createIMD("index2", 4, Settings.EMPTY); - Exception e = expectThrows(IllegalArgumentException.class, - () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), emptyMap()); + IndexMetaData followIMD = createIMD("index2", 4, Settings.EMPTY, CUSTOM_METADATA); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null)); assertThat(e.getMessage(), equalTo("leader index primary shards [5] does not match with the number of shards of the follow index [4]")); } { // should fail, because leader index is closed IndexMetaData leaderIMD = createIMD("index1", State.CLOSE, "{}", 5, Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), emptyMap()); IndexMetaData followIMD = createIMD("index2", State.OPEN, "{}", 5, Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); - Exception e = expectThrows(IllegalArgumentException.class, - () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), CUSTOM_METADATA); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null)); assertThat(e.getMessage(), equalTo("leader and follow index must be open")); } { // should fail, because leader has a field with the same name mapped as keyword and follower as text IndexMetaData leaderIMD = createIMD("index1", State.OPEN, "{\"properties\": {\"field\": {\"type\": \"keyword\"}}}", 5, - Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), emptyMap()); IndexMetaData followIMD = createIMD("index2", State.OPEN, "{\"properties\": {\"field\": {\"type\": \"text\"}}}", 5, - Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build()); + Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build(), CUSTOM_METADATA); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); mapperService.updateMapping(null, followIMD); - Exception e = expectThrows(IllegalArgumentException.class, - () -> FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService)); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, mapperService)); assertThat(e.getMessage(), equalTo("mapper [field] of different type, current_type [text], merged_type [keyword]")); } { @@ -81,39 +100,38 @@ public class FollowIndexActionTests extends ESTestCase { IndexMetaData leaderIMD = createIMD("index1", State.OPEN, mapping, 5, Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") .put("index.analysis.analyzer.my_analyzer.type", "custom") - .put("index.analysis.analyzer.my_analyzer.tokenizer", "whitespace").build()); + .put("index.analysis.analyzer.my_analyzer.tokenizer", "whitespace").build(), emptyMap()); IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder() .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) .put("index.analysis.analyzer.my_analyzer.type", "custom") - .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); - Exception e = expectThrows(IllegalArgumentException.class, - () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), CUSTOM_METADATA); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null)); assertThat(e.getMessage(), equalTo("the leader and follower index settings must be identical")); } { // should fail because the following index does not have the following_index settings IndexMetaData leaderIMD = createIMD("index1", 5, - Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), emptyMap()); Settings followingIndexSettings = randomBoolean() ? Settings.EMPTY : Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), false).build(); - IndexMetaData followIMD = createIMD("index2", 5, followingIndexSettings); + IndexMetaData followIMD = createIMD("index2", 5, followingIndexSettings, CUSTOM_METADATA); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), followingIndexSettings, "index2"); mapperService.updateMapping(null, followIMD); - IllegalArgumentException error = expectThrows(IllegalArgumentException.class, - () -> FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService)); + IllegalArgumentException error = + expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, mapperService)); assertThat(error.getMessage(), equalTo("the following index [index2] is not ready to follow; " + "the setting [index.xpack.ccr.following_index] must be enabled.")); } { // should succeed IndexMetaData leaderIMD = createIMD("index1", 5, Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), emptyMap()); IndexMetaData followIMD = createIMD("index2", 5, Settings.builder() - .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build()); + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build(), CUSTOM_METADATA); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); mapperService.updateMapping(null, followIMD); - FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); + validate(request, leaderIMD, followIMD, UUIDs, mapperService); } { // should succeed, index settings are identical @@ -121,15 +139,15 @@ public class FollowIndexActionTests extends ESTestCase { IndexMetaData leaderIMD = createIMD("index1", State.OPEN, mapping, 5, Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") .put("index.analysis.analyzer.my_analyzer.type", "custom") - .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), emptyMap()); IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder() .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) .put("index.analysis.analyzer.my_analyzer.type", "custom") - .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), CUSTOM_METADATA); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), followIMD.getSettings(), "index2"); mapperService.updateMapping(null, followIMD); - FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); + validate(request, leaderIMD, followIMD, UUIDs, mapperService); } { // should succeed despite whitelisted settings being different @@ -138,25 +156,32 @@ public class FollowIndexActionTests extends ESTestCase { .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") .put("index.analysis.analyzer.my_analyzer.type", "custom") - .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), emptyMap()); IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder() .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "10s") .put("index.analysis.analyzer.my_analyzer.type", "custom") - .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), CUSTOM_METADATA); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), followIMD.getSettings(), "index2"); mapperService.updateMapping(null, followIMD); - FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); + validate(request, leaderIMD, followIMD, UUIDs, mapperService); } } - private static IndexMetaData createIMD(String index, int numberOfShards, Settings settings) throws IOException { - return createIMD(index, State.OPEN, "{\"properties\": {}}", numberOfShards, settings); + private static IndexMetaData createIMD(String index, + int numberOfShards, + Settings settings, + Map custom) throws IOException { + return createIMD(index, State.OPEN, "{\"properties\": {}}", numberOfShards, settings, custom); } - private static IndexMetaData createIMD(String index, State state, String mapping, int numberOfShards, - Settings settings) throws IOException { + private static IndexMetaData createIMD(String index, + State state, + String mapping, + int numberOfShards, + Settings settings, + Map custom) throws IOException { return IndexMetaData.builder(index) .settings(settings(Version.CURRENT).put(settings)) .numberOfShards(numberOfShards) @@ -164,6 +189,7 @@ public class FollowIndexActionTests extends ESTestCase { .numberOfReplicas(0) .setRoutingNumShards(numberOfShards) .putMapping("_doc", mapping) + .putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, custom) .build(); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java index d894eda0b11..6e7341154c8 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; import java.util.ArrayList; import java.util.Arrays; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 4069323cd4b..ccd40eb7a5e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -265,13 +265,15 @@ public class XPackLicenseState { } } - private volatile Status status = new Status(OperationMode.TRIAL, true); - private final List listeners = new CopyOnWriteArrayList<>(); + private final List listeners; private final boolean isSecurityEnabled; private final boolean isSecurityExplicitlyEnabled; - private volatile boolean isSecurityEnabledByTrialVersion; + + private Status status = new Status(OperationMode.TRIAL, true); + private boolean isSecurityEnabledByTrialVersion; public XPackLicenseState(Settings settings) { + this.listeners = new CopyOnWriteArrayList<>(); this.isSecurityEnabled = XPackSettings.SECURITY_ENABLED.get(settings); // 6.0+ requires TLS for production licenses, so if TLS is enabled and security is enabled // we can interpret this as an explicit enabling of security if the security enabled @@ -281,6 +283,14 @@ public class XPackLicenseState { this.isSecurityEnabledByTrialVersion = false; } + private XPackLicenseState(XPackLicenseState xPackLicenseState) { + this.listeners = xPackLicenseState.listeners; + this.isSecurityEnabled = xPackLicenseState.isSecurityEnabled; + this.isSecurityExplicitlyEnabled = xPackLicenseState.isSecurityExplicitlyEnabled; + this.status = xPackLicenseState.status; + this.isSecurityEnabledByTrialVersion = xPackLicenseState.isSecurityEnabledByTrialVersion; + } + /** * Updates the current state of the license, which will change what features are available. * @@ -291,15 +301,17 @@ public class XPackLicenseState { * trial was prior to this metadata being tracked (6.1) */ void update(OperationMode mode, boolean active, @Nullable Version mostRecentTrialVersion) { - status = new Status(mode, active); - if (isSecurityEnabled == true && isSecurityExplicitlyEnabled == false && mode == OperationMode.TRIAL - && isSecurityEnabledByTrialVersion == false) { - // Before 6.3, Trial licenses would default having security enabled. - // If this license was generated before that version, then treat it as if security is explicitly enabled - if (mostRecentTrialVersion == null || mostRecentTrialVersion.before(Version.V_6_3_0)) { - Loggers.getLogger(getClass()).info("Automatically enabling security for older trial license ({})", - mostRecentTrialVersion == null ? "[pre 6.1.0]" : mostRecentTrialVersion.toString()); - isSecurityEnabledByTrialVersion = true; + synchronized (this) { + status = new Status(mode, active); + if (isSecurityEnabled == true && isSecurityExplicitlyEnabled == false && mode == OperationMode.TRIAL + && isSecurityEnabledByTrialVersion == false) { + // Before 6.3, Trial licenses would default having security enabled. + // If this license was generated before that version, then treat it as if security is explicitly enabled + if (mostRecentTrialVersion == null || mostRecentTrialVersion.before(Version.V_6_3_0)) { + Loggers.getLogger(getClass()).info("Automatically enabling security for older trial license ({})", + mostRecentTrialVersion == null ? "[pre 6.1.0]" : mostRecentTrialVersion.toString()); + isSecurityEnabledByTrialVersion = true; + } } } listeners.forEach(Runnable::run); @@ -316,12 +328,12 @@ public class XPackLicenseState { } /** Return the current license type. */ - public OperationMode getOperationMode() { + public synchronized OperationMode getOperationMode() { return status.mode; } /** Return true if the license is currently within its time boundaries, false otherwise. */ - public boolean isActive() { + public synchronized boolean isActive() { return status.active; } @@ -329,28 +341,32 @@ public class XPackLicenseState { * @return true if authentication and authorization should be enabled. this does not indicate what realms are available * @see #allowedRealmType() for the enabled realms */ - public boolean isAuthAllowed() { + public synchronized boolean isAuthAllowed() { OperationMode mode = status.mode; - return mode == OperationMode.STANDARD || mode == OperationMode.GOLD || mode == OperationMode.PLATINUM - || mode == OperationMode.TRIAL; + final boolean isSecurityCurrentlyEnabled = + isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + return isSecurityCurrentlyEnabled && (mode == OperationMode.STANDARD || mode == OperationMode.GOLD + || mode == OperationMode.PLATINUM || mode == OperationMode.TRIAL); } /** * @return true if IP filtering should be enabled */ - public boolean isIpFilteringAllowed() { + public synchronized boolean isIpFilteringAllowed() { OperationMode mode = status.mode; - return mode == OperationMode.GOLD || mode == OperationMode.PLATINUM - || mode == OperationMode.TRIAL; + final boolean isSecurityCurrentlyEnabled = + isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + return isSecurityCurrentlyEnabled && (mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.TRIAL); } /** * @return true if auditing should be enabled */ - public boolean isAuditingAllowed() { + public synchronized boolean isAuditingAllowed() { OperationMode mode = status.mode; - return mode == OperationMode.GOLD || mode == OperationMode.PLATINUM - || mode == OperationMode.TRIAL; + final boolean isSecurityCurrentlyEnabled = + isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + return isSecurityCurrentlyEnabled && (mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.TRIAL); } /** @@ -359,7 +375,7 @@ public class XPackLicenseState { * * @return true if the license allows for the stats and health APIs to be used. */ - public boolean isStatsAndHealthAllowed() { + public synchronized boolean isStatsAndHealthAllowed() { return status.active; } @@ -375,9 +391,11 @@ public class XPackLicenseState { * * @return {@code true} to enable DLS and FLS. Otherwise {@code false}. */ - public boolean isDocumentAndFieldLevelSecurityAllowed() { + public synchronized boolean isDocumentAndFieldLevelSecurityAllowed() { OperationMode mode = status.mode; - return mode == OperationMode.TRIAL || mode == OperationMode.PLATINUM; + final boolean isSecurityCurrentlyEnabled = + isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + return isSecurityCurrentlyEnabled && (mode == OperationMode.TRIAL || mode == OperationMode.PLATINUM); } /** Classes of realms that may be available based on the license type. */ @@ -391,37 +409,45 @@ public class XPackLicenseState { /** * @return the type of realms that are enabled based on the license {@link OperationMode} */ - public AllowedRealmType allowedRealmType() { - switch (status.mode) { - case PLATINUM: - case TRIAL: - return AllowedRealmType.ALL; - case GOLD: - return AllowedRealmType.DEFAULT; - case STANDARD: - return AllowedRealmType.NATIVE; - default: - return AllowedRealmType.NONE; + public synchronized AllowedRealmType allowedRealmType() { + final boolean isSecurityCurrentlyEnabled = + isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + if (isSecurityCurrentlyEnabled) { + switch (status.mode) { + case PLATINUM: + case TRIAL: + return AllowedRealmType.ALL; + case GOLD: + return AllowedRealmType.DEFAULT; + case STANDARD: + return AllowedRealmType.NATIVE; + default: + return AllowedRealmType.NONE; + } + } else { + return AllowedRealmType.NONE; } } /** * @return whether custom role providers are allowed based on the license {@link OperationMode} */ - public boolean isCustomRoleProvidersAllowed() { - final Status localStatus = status; - return (localStatus.mode == OperationMode.PLATINUM || localStatus.mode == OperationMode.TRIAL) - && localStatus.active; + public synchronized boolean isCustomRoleProvidersAllowed() { + final boolean isSecurityCurrentlyEnabled = + isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + return isSecurityCurrentlyEnabled && (status.mode == OperationMode.PLATINUM || status.mode == OperationMode.TRIAL) + && status.active; } /** * @return whether "authorization_realms" are allowed based on the license {@link OperationMode} * @see org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings */ - public boolean isAuthorizationRealmAllowed() { - final Status localStatus = status; - return (localStatus.mode == OperationMode.PLATINUM || localStatus.mode == OperationMode.TRIAL) - && localStatus.active; + public synchronized boolean isAuthorizationRealmAllowed() { + final boolean isSecurityCurrentlyEnabled = + isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + return isSecurityCurrentlyEnabled && (status.mode == OperationMode.PLATINUM || status.mode == OperationMode.TRIAL) + && status.active; } /** @@ -437,8 +463,7 @@ public class XPackLicenseState { * * @return {@code true} as long as the license is valid. Otherwise {@code false}. */ - public boolean isWatcherAllowed() { - // status is volatile, so a local variable is used for a consistent view + public synchronized boolean isWatcherAllowed() { Status localStatus = status; if (localStatus.active == false) { @@ -461,7 +486,7 @@ public class XPackLicenseState { * * @return true if the license is active */ - public boolean isMonitoringAllowed() { + public synchronized boolean isMonitoringAllowed() { return status.active; } @@ -471,7 +496,7 @@ public class XPackLicenseState { * @return {@link #isWatcherAllowed()} * @see #isWatcherAllowed() */ - public boolean isMonitoringClusterAlertsAllowed() { + public synchronized boolean isMonitoringClusterAlertsAllowed() { return isWatcherAllowed(); } @@ -484,7 +509,7 @@ public class XPackLicenseState { * * @return {@code true} if the user is allowed to modify the retention. Otherwise {@code false}. */ - public boolean isUpdateRetentionAllowed() { + public synchronized boolean isUpdateRetentionAllowed() { final OperationMode mode = status.mode; return mode != OperationMode.BASIC && mode != OperationMode.MISSING; } @@ -500,8 +525,7 @@ public class XPackLicenseState { * * @return {@code true} as long as the license is valid. Otherwise {@code false}. */ - public boolean isGraphAllowed() { - // status is volatile + public synchronized boolean isGraphAllowed() { Status localStatus = status; OperationMode operationMode = localStatus.mode; @@ -523,8 +547,7 @@ public class XPackLicenseState { * @return {@code true} as long as the license is valid. Otherwise * {@code false}. */ - public boolean isMachineLearningAllowed() { - // one-time volatile read as status could be updated on us while performing this check + public synchronized boolean isMachineLearningAllowed() { final Status currentStatus = status; return currentStatus.active && isMachineLearningAllowedForOperationMode(currentStatus.mode); } @@ -538,7 +561,7 @@ public class XPackLicenseState { * * @return true if the license is active */ - public boolean isRollupAllowed() { + public synchronized boolean isRollupAllowed() { return status.active; } @@ -546,7 +569,7 @@ public class XPackLicenseState { * Logstash is allowed as long as there is an active license of type TRIAL, STANDARD, GOLD or PLATINUM * @return {@code true} as long as there is a valid license */ - public boolean isLogstashAllowed() { + public synchronized boolean isLogstashAllowed() { Status localStatus = status; return localStatus.active && (isBasic(localStatus.mode) == false); } @@ -555,7 +578,7 @@ public class XPackLicenseState { * Beats is allowed as long as there is an active license of type TRIAL, STANDARD, GOLD or PLATINUM * @return {@code true} as long as there is a valid license */ - public boolean isBeatsAllowed() { + public synchronized boolean isBeatsAllowed() { Status localStatus = status; return localStatus.active && (isBasic(localStatus.mode) == false); @@ -565,7 +588,7 @@ public class XPackLicenseState { * Deprecation APIs are always allowed as long as there is an active license * @return {@code true} as long as there is a valid license */ - public boolean isDeprecationAllowed() { + public synchronized boolean isDeprecationAllowed() { return status.active; } @@ -577,11 +600,9 @@ public class XPackLicenseState { * @return {@code true} as long as the license is valid. Otherwise * {@code false}. */ - public boolean isUpgradeAllowed() { - // status is volatile - Status localStatus = status; + public synchronized boolean isUpgradeAllowed() { // Should work on all active licenses - return localStatus.active; + return status.active; } /** @@ -605,7 +626,7 @@ public class XPackLicenseState { *

* SQL is available for all license types except {@link OperationMode#MISSING} */ - public boolean isSqlAllowed() { + public synchronized boolean isSqlAllowed() { return status.active; } @@ -614,8 +635,7 @@ public class XPackLicenseState { *

* JDBC is available only in for {@link OperationMode#PLATINUM} and {@link OperationMode#TRIAL} licences */ - public boolean isJdbcAllowed() { - // status is volatile + public synchronized boolean isJdbcAllowed() { Status localStatus = status; OperationMode operationMode = localStatus.mode; @@ -624,18 +644,35 @@ public class XPackLicenseState { return licensed && localStatus.active; } - public boolean isTrialLicense() { + public synchronized boolean isTrialLicense() { return status.mode == OperationMode.TRIAL; } - public boolean isSecurityAvailable() { + /** + * @return true if security is available to be used with the current license type + */ + public synchronized boolean isSecurityAvailable() { OperationMode mode = status.mode; return mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.STANDARD || mode == OperationMode.TRIAL; } - public boolean isSecurityEnabled() { - final OperationMode mode = status.mode; + /** + * @return true if security has been disabled by a trial license which is the case of the + * default distribution post 6.3.0. The conditions necessary for this are: + *

    + *
  • A trial license generated in 6.3.0+
  • + *
  • xpack.security.enabled not specified as a setting
  • + *
+ */ + public synchronized boolean isSecurityDisabledByTrialLicense() { + return status.mode == OperationMode.TRIAL && isSecurityEnabled + && isSecurityExplicitlyEnabled == false + && isSecurityEnabledByTrialVersion == false; + } + + private static boolean isSecurityEnabled(final OperationMode mode, final boolean isSecurityExplicitlyEnabled, + final boolean isSecurityEnabledByTrialVersion, final boolean isSecurityEnabled) { return mode == OperationMode.TRIAL ? (isSecurityExplicitlyEnabled || isSecurityEnabledByTrialVersion) : isSecurityEnabled; } @@ -650,8 +687,7 @@ public class XPackLicenseState { * * @return true is the license is compatible, otherwise false */ - public boolean isCcrAllowed() { - // one-time volatile read as status could be updated on us while performing this check + public synchronized boolean isCcrAllowed() { final Status currentStatus = status; return currentStatus.active && isCcrAllowedForOperationMode(currentStatus.mode); } @@ -664,4 +700,14 @@ public class XPackLicenseState { return operationMode == OperationMode.PLATINUM || operationMode == OperationMode.TRIAL; } + /** + * Creates a copy of this object based on the state at the time the method was called. The + * returned object will not be modified by a license update/expiration so it can be used to + * make multiple method calls on the license state safely. This object should not be long + * lived but instead used within a method when a consistent view of the license state + * is needed for multiple interactions with the license state. + */ + public synchronized XPackLicenseState copyCurrentLicenseState() { + return new XPackLicenseState(this); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SeqIdGeneratingFilterReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SeqIdGeneratingFilterReader.java new file mode 100644 index 00000000000..8dd5d9d98ca --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SeqIdGeneratingFilterReader.java @@ -0,0 +1,162 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.snapshots; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FilterDirectoryReader; +import org.apache.lucene.index.FilterLeafReader; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.Terms; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.VersionFieldMapper; + +import java.io.IOException; +import java.util.IdentityHashMap; +import java.util.Map; + +/** + * This filter reader fakes sequence ID, primary term and version + * for a source only index. + */ +final class SeqIdGeneratingFilterReader extends FilterDirectoryReader { + private final long primaryTerm; + + private SeqIdGeneratingFilterReader(DirectoryReader in, SeqIdGeneratingSubReaderWrapper wrapper) throws IOException { + super(in, wrapper); + primaryTerm = wrapper.primaryTerm; + } + + @Override + protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { + return wrap(in, primaryTerm); + } + + static DirectoryReader wrap(DirectoryReader in, long primaryTerm) throws IOException { + Map ctxMap = new IdentityHashMap<>(); + for (LeafReaderContext leave : in.leaves()) { + ctxMap.put(leave.reader(), leave); + } + return new SeqIdGeneratingFilterReader(in, new SeqIdGeneratingSubReaderWrapper(ctxMap, primaryTerm)); + } + + @Override + public CacheHelper getReaderCacheHelper() { + return in.getReaderCacheHelper(); + } + + private abstract static class FakeNumericDocValues extends NumericDocValues { + private final int maxDoc; + int docID = -1; + + FakeNumericDocValues(int maxDoc) { + this.maxDoc = maxDoc; + } + + @Override + public int docID() { + return docID; + } + + @Override + public int nextDoc() { + if (docID+1 < maxDoc) { + docID++; + } else { + docID = NO_MORE_DOCS; + } + return docID; + } + + @Override + public int advance(int target) { + if (target >= maxDoc) { + docID = NO_MORE_DOCS; + } else { + docID = target; + } + return docID; + } + + @Override + public long cost() { + return maxDoc; + } + + @Override + public boolean advanceExact(int target) { + advance(target); + return docID != NO_MORE_DOCS; + } + } + + private static class SeqIdGeneratingSubReaderWrapper extends SubReaderWrapper { + private final Map ctxMap; + private final long primaryTerm; + + SeqIdGeneratingSubReaderWrapper(Map ctxMap, long primaryTerm) { + this.ctxMap = ctxMap; + this.primaryTerm = primaryTerm; + } + + @Override + public LeafReader wrap(LeafReader reader) { + LeafReaderContext leafReaderContext = ctxMap.get(reader); + final int docBase = leafReaderContext.docBase; + return new FilterLeafReader(reader) { + + @Override + public NumericDocValues getNumericDocValues(String field) throws IOException { + if (SeqNoFieldMapper.NAME.equals(field)) { + return new FakeNumericDocValues(maxDoc()) { + @Override + public long longValue() { + return docBase + docID; + } + }; + } else if (SeqNoFieldMapper.PRIMARY_TERM_NAME.equals(field)) { + return new FakeNumericDocValues(maxDoc()) { + @Override + public long longValue() { + return primaryTerm; + } + }; + } else if (VersionFieldMapper.NAME.equals(field)) { + return new FakeNumericDocValues(maxDoc()) { + @Override + public long longValue() { + return 1; + } + }; + } + return super.getNumericDocValues(field); + } + + @Override + public CacheHelper getCoreCacheHelper() { + return reader.getCoreCacheHelper(); + } + + @Override + public CacheHelper getReaderCacheHelper() { + return reader.getReaderCacheHelper(); + } + + @Override + public Terms terms(String field) { + throw new UnsupportedOperationException("_source only indices can't be searched or filtered"); + } + + @Override + public PointValues getPointValues(String field) { + throw new UnsupportedOperationException("_source only indices can't be searched or filtered"); + } + }; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java new file mode 100644 index 00000000000..b7d6a51f45a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java @@ -0,0 +1,261 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.snapshots; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.index.CheckIndex; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentCommitInfo; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesDirectoryReaderWrapper; +import org.apache.lucene.index.StandardDirectoryReader; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.Lock; +import org.apache.lucene.store.TrackingDirectoryWrapper; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.FixedBitSet; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.core.internal.io.IOUtils; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static org.apache.lucene.codecs.compressing.CompressingStoredFieldsWriter.FIELDS_EXTENSION; +import static org.apache.lucene.codecs.compressing.CompressingStoredFieldsWriter.FIELDS_INDEX_EXTENSION; + +public class SourceOnlySnapshot { + private final Directory targetDirectory; + private final Supplier deleteByQuerySupplier; + + public SourceOnlySnapshot(Directory targetDirectory, Supplier deleteByQuerySupplier) { + this.targetDirectory = targetDirectory; + this.deleteByQuerySupplier = deleteByQuerySupplier; + } + + public SourceOnlySnapshot(Directory targetDirectory) { + this(targetDirectory, null); + } + + public synchronized List syncSnapshot(IndexCommit commit) throws IOException { + long generation; + Map existingSegments = new HashMap<>(); + if (Lucene.indexExists(targetDirectory)) { + SegmentInfos existingsSegmentInfos = Lucene.readSegmentInfos(targetDirectory); + for (SegmentCommitInfo info : existingsSegmentInfos) { + existingSegments.put(new BytesRef(info.info.getId()), info); + } + generation = existingsSegmentInfos.getGeneration(); + } else { + generation = 1; + } + List createdFiles = new ArrayList<>(); + String segmentFileName; + try (Lock writeLock = targetDirectory.obtainLock(IndexWriter.WRITE_LOCK_NAME); + StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(commit)) { + SegmentInfos segmentInfos = reader.getSegmentInfos(); + DirectoryReader wrapper = wrapReader(reader); + List newInfos = new ArrayList<>(); + for (LeafReaderContext ctx : wrapper.leaves()) { + SegmentCommitInfo info = segmentInfos.info(ctx.ord); + LeafReader leafReader = ctx.reader(); + LiveDocs liveDocs = getLiveDocs(leafReader); + if (leafReader.numDocs() != 0) { // fully deleted segments don't need to be processed + SegmentCommitInfo newInfo = syncSegment(info, liveDocs, leafReader.getFieldInfos(), existingSegments, createdFiles); + newInfos.add(newInfo); + } + } + segmentInfos.clear(); + segmentInfos.addAll(newInfos); + segmentInfos.setNextWriteGeneration(Math.max(segmentInfos.getGeneration(), generation)+1); + String pendingSegmentFileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.PENDING_SEGMENTS, + "", segmentInfos.getGeneration()); + try (IndexOutput segnOutput = targetDirectory.createOutput(pendingSegmentFileName, IOContext.DEFAULT)) { + segmentInfos.write(targetDirectory, segnOutput); + } + targetDirectory.sync(Collections.singleton(pendingSegmentFileName)); + targetDirectory.sync(createdFiles); + segmentFileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", segmentInfos.getGeneration()); + targetDirectory.rename(pendingSegmentFileName, segmentFileName); + } + Lucene.pruneUnreferencedFiles(segmentFileName, targetDirectory); + assert assertCheckIndex(); + return Collections.unmodifiableList(createdFiles); + } + + private LiveDocs getLiveDocs(LeafReader reader) throws IOException { + if (deleteByQuerySupplier != null) { + // we have this additional delete by query functionality to filter out documents before we snapshot them + // we can't filter after the fact since we don't have an index anymore. + Query query = deleteByQuerySupplier.get(); + IndexSearcher s = new IndexSearcher(reader); + s.setQueryCache(null); + Query rewrite = s.rewrite(query); + Weight weight = s.createWeight(rewrite, ScoreMode.COMPLETE_NO_SCORES, 1.0f); + Scorer scorer = weight.scorer(reader.getContext()); + if (scorer != null) { + DocIdSetIterator iterator = scorer.iterator(); + if (iterator != null) { + Bits liveDocs = reader.getLiveDocs(); + final FixedBitSet bits; + if (liveDocs != null) { + bits = FixedBitSet.copyOf(liveDocs); + } else { + bits = new FixedBitSet(reader.maxDoc()); + bits.set(0, reader.maxDoc()); + } + int newDeletes = apply(iterator, bits); + if (newDeletes != 0) { + int numDeletes = reader.numDeletedDocs() + newDeletes; + return new LiveDocs(numDeletes, bits); + } + } + } + } + return new LiveDocs(reader.numDeletedDocs(), reader.getLiveDocs()); + } + + private int apply(DocIdSetIterator iterator, FixedBitSet bits) throws IOException { + int docID = -1; + int newDeletes = 0; + while ((docID = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + if (bits.get(docID)) { + bits.clear(docID); + newDeletes++; + } + } + return newDeletes; + } + + + private boolean assertCheckIndex() throws IOException { + ByteArrayOutputStream output = new ByteArrayOutputStream(1024); + try (CheckIndex checkIndex = new CheckIndex(targetDirectory)) { + checkIndex.setFailFast(true); + checkIndex.setInfoStream(new PrintStream(output, false, IOUtils.UTF_8), false); + CheckIndex.Status status = checkIndex.checkIndex(); + if (status == null || status.clean == false) { + throw new RuntimeException("CheckIndex failed: " + output.toString(IOUtils.UTF_8)); + } + return true; + } + } + + DirectoryReader wrapReader(DirectoryReader reader) throws IOException { + String softDeletesField = null; + for (LeafReaderContext ctx : reader.leaves()) { + String field = ctx.reader().getFieldInfos().getSoftDeletesField(); + if (field != null) { + softDeletesField = field; + break; + } + } + return softDeletesField == null ? reader : new SoftDeletesDirectoryReaderWrapper(reader, softDeletesField); + } + + private SegmentCommitInfo syncSegment(SegmentCommitInfo segmentCommitInfo, LiveDocs liveDocs, FieldInfos fieldInfos, + Map existingSegments, List createdFiles) throws IOException { + SegmentInfo si = segmentCommitInfo.info; + Codec codec = si.getCodec(); + final String segmentSuffix = ""; + SegmentCommitInfo newInfo; + final TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(targetDirectory); + BytesRef segmentId = new BytesRef(si.getId()); + boolean exists = existingSegments.containsKey(segmentId); + if (exists == false) { + SegmentInfo newSegmentInfo = new SegmentInfo(si.dir, si.getVersion(), si.getMinVersion(), si.name, si.maxDoc(), false, + si.getCodec(), si.getDiagnostics(), si.getId(), si.getAttributes(), null); + // we drop the sort on purpose since the field we sorted on doesn't exist in the target index anymore. + newInfo = new SegmentCommitInfo(newSegmentInfo, 0, 0, -1, -1, -1); + List fieldInfoCopy = new ArrayList<>(fieldInfos.size()); + for (FieldInfo fieldInfo : fieldInfos) { + fieldInfoCopy.add(new FieldInfo(fieldInfo.name, fieldInfo.number, + false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, fieldInfo.attributes(), 0, 0, + fieldInfo.isSoftDeletesField())); + } + FieldInfos newFieldInfos = new FieldInfos(fieldInfoCopy.toArray(new FieldInfo[0])); + codec.fieldInfosFormat().write(trackingDir, newSegmentInfo, segmentSuffix, newFieldInfos, IOContext.DEFAULT); + newInfo.setFieldInfosFiles(trackingDir.getCreatedFiles()); + String idxFile = IndexFileNames.segmentFileName(newSegmentInfo.name, segmentSuffix, FIELDS_INDEX_EXTENSION); + String dataFile = IndexFileNames.segmentFileName(newSegmentInfo.name, segmentSuffix, FIELDS_EXTENSION); + Directory sourceDir = newSegmentInfo.dir; + if (si.getUseCompoundFile()) { + sourceDir = codec.compoundFormat().getCompoundReader(sourceDir, si, IOContext.DEFAULT); + } + trackingDir.copyFrom(sourceDir, idxFile, idxFile, IOContext.DEFAULT); + trackingDir.copyFrom(sourceDir, dataFile, dataFile, IOContext.DEFAULT); + if (sourceDir != newSegmentInfo.dir) { + sourceDir.close(); + } + } else { + newInfo = existingSegments.get(segmentId); + assert newInfo.info.getUseCompoundFile() == false; + } + if (liveDocs.bits != null && liveDocs.numDeletes != 0 && liveDocs.numDeletes != newInfo.getDelCount()) { + if (newInfo.getDelCount() != 0) { + assert assertLiveDocs(liveDocs.bits, liveDocs.numDeletes); + } + codec.liveDocsFormat().writeLiveDocs(liveDocs.bits, trackingDir, newInfo, liveDocs.numDeletes - newInfo.getDelCount(), + IOContext.DEFAULT); + SegmentCommitInfo info = new SegmentCommitInfo(newInfo.info, liveDocs.numDeletes, 0, newInfo.getNextDelGen(), -1, -1); + info.setFieldInfosFiles(newInfo.getFieldInfosFiles()); + info.info.setFiles(trackingDir.getCreatedFiles()); + newInfo = info; + } + if (exists == false) { + newInfo.info.setFiles(trackingDir.getCreatedFiles()); + codec.segmentInfoFormat().write(trackingDir, newInfo.info, IOContext.DEFAULT); + } + createdFiles.addAll(trackingDir.getCreatedFiles()); + return newInfo; + } + + private boolean assertLiveDocs(Bits liveDocs, int deletes) { + int actualDeletes = 0; + for (int i = 0; i < liveDocs.length(); i++ ) { + if (liveDocs.get(i) == false) { + actualDeletes++; + } + } + assert actualDeletes == deletes : " actual: " + actualDeletes + " deletes: " + deletes; + return true; + } + + private static class LiveDocs { + final int numDeletes; + final Bits bits; + + LiveDocs(int numDeletes, Bits bits) { + this.numDeletes = numDeletes; + this.bits = bits; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java new file mode 100644 index 00000000000..a75d5f488ee --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java @@ -0,0 +1,181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.snapshots; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.store.SimpleFSDirectory; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.ShardLock; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.ReadOnlyEngine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.TranslogStats; +import org.elasticsearch.repositories.FilterRepository; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.Repository; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.util.Iterator; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +/** + *

+ * This is a filter snapshot repository that only snapshots the minimal required information + * that is needed to recreate the index. In other words instead of snapshotting the entire shard + * with all it's lucene indexed fields, doc values, points etc. it only snapshots the the stored + * fields including _source and _routing as well as the live docs in oder to distinguish between + * live and deleted docs. + *

+ *

+ * The repository can wrap any other repository delegating the source only snapshot to it to and read + * from it. For instance a file repository of type fs by passing settings.delegate_type=fs + * at repository creation time. + *

+ * Snapshots restored from source only snapshots are minimal indices that are read-only and only allow + * match_all scroll searches in order to reindex the data. + */ +public final class SourceOnlySnapshotRepository extends FilterRepository { + private static final Setting DELEGATE_TYPE = new Setting<>("delegate_type", "", Function.identity(), Setting.Property + .NodeScope); + public static final Setting SOURCE_ONLY = Setting.boolSetting("index.source_only", false, Setting + .Property.IndexScope, Setting.Property.Final, Setting.Property.PrivateIndex); + + private static final String SNAPSHOT_DIR_NAME = "_snapshot"; + + SourceOnlySnapshotRepository(Repository in) { + super(in); + } + + @Override + public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData metaData) { + // we process the index metadata at snapshot time. This means if somebody tries to restore + // a _source only snapshot with a plain repository it will be just fine since we already set the + // required engine, that the index is read-only and the mapping to a default mapping + try { + MetaData.Builder builder = MetaData.builder(metaData); + for (IndexId indexId : indices) { + IndexMetaData index = metaData.index(indexId.getName()); + IndexMetaData.Builder indexMetadataBuilder = IndexMetaData.builder(index); + // for a minimal restore we basically disable indexing on all fields and only create an index + // that is valid from an operational perspective. ie. it will have all metadata fields like version/ + // seqID etc. and an indexed ID field such that we can potentially perform updates on them or delete documents. + ImmutableOpenMap mappings = index.getMappings(); + Iterator> iterator = mappings.iterator(); + while (iterator.hasNext()) { + ObjectObjectCursor next = iterator.next(); + // we don't need to obey any routing here stuff is read-only anyway and get is disabled + final String mapping = "{ \"" + next.key + "\": { \"enabled\": false, \"_meta\": " + next.value.source().string() + + " } }"; + indexMetadataBuilder.putMapping(next.key, mapping); + } + indexMetadataBuilder.settings(Settings.builder().put(index.getSettings()) + .put(SOURCE_ONLY.getKey(), true) + .put("index.blocks.write", true)); // read-only! + builder.put(indexMetadataBuilder); + } + super.initializeSnapshot(snapshotId, indices, builder.build()); + } catch (IOException ex) { + throw new UncheckedIOException(ex); + } + } + + @Override + public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, + IndexShardSnapshotStatus snapshotStatus) { + if (shard.mapperService().documentMapper() != null // if there is no mapping this is null + && shard.mapperService().documentMapper().sourceMapper().isComplete() == false) { + throw new IllegalStateException("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled " + + "or filters the source"); + } + ShardPath shardPath = shard.shardPath(); + Path dataPath = shardPath.getDataPath(); + // TODO should we have a snapshot tmp directory per shard that is maintained by the system? + Path snapPath = dataPath.resolve(SNAPSHOT_DIR_NAME); + try (FSDirectory directory = new SimpleFSDirectory(snapPath)) { + Store tempStore = new Store(store.shardId(), store.indexSettings(), directory, new ShardLock(store.shardId()) { + @Override + protected void closeInternal() { + // do nothing; + } + }, Store.OnClose.EMPTY); + Supplier querySupplier = shard.mapperService().hasNested() ? Queries::newNestedFilter : null; + // SourceOnlySnapshot will take care of soft- and hard-deletes no special casing needed here + SourceOnlySnapshot snapshot = new SourceOnlySnapshot(tempStore.directory(), querySupplier); + snapshot.syncSnapshot(snapshotIndexCommit); + // we will use the lucene doc ID as the seq ID so we set the local checkpoint to maxDoc with a new index UUID + SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); + tempStore.bootstrapNewHistory(segmentInfos.totalMaxDoc()); + store.incRef(); + try (DirectoryReader reader = DirectoryReader.open(tempStore.directory())) { + IndexCommit indexCommit = reader.getIndexCommit(); + super.snapshotShard(shard, tempStore, snapshotId, indexId, indexCommit, snapshotStatus); + } finally { + store.decRef(); + } + } catch (IOException e) { + // why on earth does this super method not declare IOException + throw new UncheckedIOException(e); + } + } + + /** + * Returns an {@link EngineFactory} for the source only snapshots. + */ + public static EngineFactory getEngineFactory() { + return config -> new ReadOnlyEngine(config, null, new TranslogStats(0, 0, 0, 0, 0), true, + reader -> { + try { + return SeqIdGeneratingFilterReader.wrap(reader, config.getPrimaryTermSupplier().getAsLong()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); + } + + /** + * Returns a new source only repository factory + */ + public static Repository.Factory newRepositoryFactory() { + return new Repository.Factory() { + + @Override + public Repository create(RepositoryMetaData metadata) { + throw new UnsupportedOperationException(); + } + + @Override + public Repository create(RepositoryMetaData metaData, Function typeLookup) throws Exception { + String delegateType = DELEGATE_TYPE.get(metaData.settings()); + if (Strings.hasLength(delegateType) == false) { + throw new IllegalArgumentException(DELEGATE_TYPE.getKey() + " must be set"); + } + Repository.Factory factory = typeLookup.apply(delegateType); + return new SourceOnlySnapshotRepository(factory.create(new RepositoryMetaData(metaData.name(), + delegateType, metaData.settings()), typeLookup)); + } + }; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java index da879e87c54..db8981055d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java @@ -13,6 +13,7 @@ import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackInfoRequestBuilder; +import org.elasticsearch.xpack.core.ccr.client.CcrClient; import org.elasticsearch.xpack.core.indexlifecycle.client.ILMClient; import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; @@ -21,6 +22,7 @@ import org.elasticsearch.xpack.core.watcher.client.WatcherClient; import java.util.Collections; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; @@ -29,6 +31,7 @@ public class XPackClient { private final Client client; + private final CcrClient ccrClient; private final LicensingClient licensingClient; private final MonitoringClient monitoringClient; private final SecurityClient securityClient; @@ -37,7 +40,8 @@ public class XPackClient { private final ILMClient ilmClient; public XPackClient(Client client) { - this.client = client; + this.client = Objects.requireNonNull(client, "client"); + this.ccrClient = new CcrClient(client); this.licensingClient = new LicensingClient(client); this.monitoringClient = new MonitoringClient(client); this.securityClient = new SecurityClient(client); @@ -50,6 +54,10 @@ public class XPackClient { return client; } + public CcrClient ccr() { + return ccrClient; + } + public LicensingClient licensing() { return licensingClient; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index aaa3effcfe8..ca76e71e052 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -31,21 +31,28 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.LicensesMetaData; import org.elasticsearch.license.Licensing; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.ExtensiblePlugin; +import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.snapshots.SourceOnlySnapshotRepository; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.action.TransportXPackInfoAction; @@ -67,13 +74,15 @@ import java.security.PrivilegedAction; import java.time.Clock; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.StreamSupport; -public class XPackPlugin extends XPackClientPlugin implements ScriptPlugin, ExtensiblePlugin { +public class XPackPlugin extends XPackClientPlugin implements ScriptPlugin, ExtensiblePlugin, RepositoryPlugin, EnginePlugin { private static Logger logger = ESLoggerFactory.getLogger(XPackPlugin.class); private static DeprecationLogger deprecationLogger = new DeprecationLogger(logger); @@ -340,4 +349,23 @@ public class XPackPlugin extends XPackClientPlugin implements ScriptPlugin, Exte } } + @Override + public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { + return Collections.singletonMap("source", SourceOnlySnapshotRepository.newRepositoryFactory()); + } + + @Override + public Optional getEngineFactory(IndexSettings indexSettings) { + if (indexSettings.getValue(SourceOnlySnapshotRepository.SOURCE_ONLY)) { + return Optional.of(SourceOnlySnapshotRepository.getEngineFactory()); + } + return Optional.empty(); + } + + @Override + public List> getSettings() { + List> settings = super.getSettings(); + settings.add(SourceOnlySnapshotRepository.SOURCE_ONLY); + return settings; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 31cfe239dd8..111d8a9a68c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -36,6 +36,12 @@ public class XPackSettings { throw new IllegalStateException("Utility class should not be instantiated"); } + + /** + * Setting for controlling whether or not CCR is enabled. + */ + public static final Setting CCR_ENABLED_SETTING = Setting.boolSetting("xpack.ccr.enabled", true, Property.NodeScope); + /** Setting for enabling or disabling security. Defaults to true. */ public static final Setting SECURITY_ENABLED = Setting.boolSetting("xpack.security.enabled", true, Setting.Property.NodeScope); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java index 244a5d441d9..71fd13d0b50 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java @@ -169,7 +169,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i public static final ParseField MAX_BATCH_SIZE_IN_BYTES = new ParseField("max_batch_size_in_bytes"); public static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); - public static final ParseField RETRY_TIMEOUT = new ParseField("retry_timeout"); + public static final ParseField MAX_RETRY_DELAY = new ParseField("retry_timeout"); public static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay"); @SuppressWarnings("unchecked") @@ -187,8 +187,8 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_WRITE_BATCHES); PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_BUFFER_SIZE); PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), RETRY_TIMEOUT.getPreferredName()), - RETRY_TIMEOUT, ObjectParser.ValueType.STRING); + (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), + MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()), IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); @@ -201,12 +201,12 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i private final Long maxOperationSizeInBytes; private final Integer maxConcurrentWriteBatches; private final Integer maxWriteBufferSize; - private final TimeValue retryTimeout; + private final TimeValue maxRetryDelay; private final TimeValue idleShardRetryDelay; public AutoFollowPattern(List leaderIndexPatterns, String followIndexPattern, Integer maxBatchOperationCount, Integer maxConcurrentReadBatches, Long maxOperationSizeInBytes, Integer maxConcurrentWriteBatches, - Integer maxWriteBufferSize, TimeValue retryTimeout, TimeValue idleShardRetryDelay) { + Integer maxWriteBufferSize, TimeValue maxRetryDelay, TimeValue idleShardRetryDelay) { this.leaderIndexPatterns = leaderIndexPatterns; this.followIndexPattern = followIndexPattern; this.maxBatchOperationCount = maxBatchOperationCount; @@ -214,7 +214,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i this.maxOperationSizeInBytes = maxOperationSizeInBytes; this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; this.maxWriteBufferSize = maxWriteBufferSize; - this.retryTimeout = retryTimeout; + this.maxRetryDelay = maxRetryDelay; this.idleShardRetryDelay = idleShardRetryDelay; } @@ -226,7 +226,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i maxOperationSizeInBytes = in.readOptionalLong(); maxConcurrentWriteBatches = in.readOptionalVInt(); maxWriteBufferSize = in.readOptionalVInt(); - retryTimeout = in.readOptionalTimeValue(); + maxRetryDelay = in.readOptionalTimeValue(); idleShardRetryDelay = in.readOptionalTimeValue(); } @@ -266,8 +266,8 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i return maxWriteBufferSize; } - public TimeValue getRetryTimeout() { - return retryTimeout; + public TimeValue getMaxRetryDelay() { + return maxRetryDelay; } public TimeValue getIdleShardRetryDelay() { @@ -283,7 +283,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i out.writeOptionalLong(maxOperationSizeInBytes); out.writeOptionalVInt(maxConcurrentWriteBatches); out.writeOptionalVInt(maxWriteBufferSize); - out.writeOptionalTimeValue(retryTimeout); + out.writeOptionalTimeValue(maxRetryDelay); out.writeOptionalTimeValue(idleShardRetryDelay); } @@ -308,8 +308,8 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i if (maxWriteBufferSize != null){ builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); } - if (retryTimeout != null) { - builder.field(RETRY_TIMEOUT.getPreferredName(), retryTimeout); + if (maxRetryDelay != null) { + builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay); } if (idleShardRetryDelay != null) { builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay); @@ -334,7 +334,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i Objects.equals(maxOperationSizeInBytes, that.maxOperationSizeInBytes) && Objects.equals(maxConcurrentWriteBatches, that.maxConcurrentWriteBatches) && Objects.equals(maxWriteBufferSize, that.maxWriteBufferSize) && - Objects.equals(retryTimeout, that.retryTimeout) && + Objects.equals(maxRetryDelay, that.maxRetryDelay) && Objects.equals(idleShardRetryDelay, that.idleShardRetryDelay); } @@ -348,7 +348,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i maxOperationSizeInBytes, maxConcurrentWriteBatches, maxWriteBufferSize, - retryTimeout, + maxRetryDelay, idleShardRetryDelay ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java new file mode 100644 index 00000000000..dafb4a5e29f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java @@ -0,0 +1,525 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ccr; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.tasks.Task; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Objects; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +public class ShardFollowNodeTaskStatus implements Task.Status { + + public static final String STATUS_PARSER_NAME = "shard-follow-node-task-status"; + + private static final ParseField LEADER_INDEX = new ParseField("leader_index"); + private static final ParseField FOLLOWER_INDEX = new ParseField("follower_index"); + private static final ParseField SHARD_ID = new ParseField("shard_id"); + private static final ParseField LEADER_GLOBAL_CHECKPOINT_FIELD = new ParseField("leader_global_checkpoint"); + private static final ParseField LEADER_MAX_SEQ_NO_FIELD = new ParseField("leader_max_seq_no"); + private static final ParseField FOLLOWER_GLOBAL_CHECKPOINT_FIELD = new ParseField("follower_global_checkpoint"); + private static final ParseField FOLLOWER_MAX_SEQ_NO_FIELD = new ParseField("follower_max_seq_no"); + private static final ParseField LAST_REQUESTED_SEQ_NO_FIELD = new ParseField("last_requested_seq_no"); + private static final ParseField NUMBER_OF_CONCURRENT_READS_FIELD = new ParseField("number_of_concurrent_reads"); + private static final ParseField NUMBER_OF_CONCURRENT_WRITES_FIELD = new ParseField("number_of_concurrent_writes"); + private static final ParseField NUMBER_OF_QUEUED_WRITES_FIELD = new ParseField("number_of_queued_writes"); + private static final ParseField MAPPING_VERSION_FIELD = new ParseField("mapping_version"); + private static final ParseField TOTAL_FETCH_TIME_MILLIS_FIELD = new ParseField("total_fetch_time_millis"); + private static final ParseField NUMBER_OF_SUCCESSFUL_FETCHES_FIELD = new ParseField("number_of_successful_fetches"); + private static final ParseField NUMBER_OF_FAILED_FETCHES_FIELD = new ParseField("number_of_failed_fetches"); + private static final ParseField OPERATIONS_RECEIVED_FIELD = new ParseField("operations_received"); + private static final ParseField TOTAL_TRANSFERRED_BYTES = new ParseField("total_transferred_bytes"); + private static final ParseField TOTAL_INDEX_TIME_MILLIS_FIELD = new ParseField("total_index_time_millis"); + private static final ParseField NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD = new ParseField("number_of_successful_bulk_operations"); + private static final ParseField NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD = new ParseField("number_of_failed_bulk_operations"); + private static final ParseField NUMBER_OF_OPERATIONS_INDEXED_FIELD = new ParseField("number_of_operations_indexed"); + private static final ParseField FETCH_EXCEPTIONS = new ParseField("fetch_exceptions"); + private static final ParseField TIME_SINCE_LAST_FETCH_MILLIS_FIELD = new ParseField("time_since_last_fetch_millis"); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser STATUS_PARSER = + new ConstructingObjectParser<>( + STATUS_PARSER_NAME, + args -> new ShardFollowNodeTaskStatus( + (String) args[0], + (String) args[1], + (int) args[2], + (long) args[3], + (long) args[4], + (long) args[5], + (long) args[6], + (long) args[7], + (int) args[8], + (int) args[9], + (int) args[10], + (long) args[11], + (long) args[12], + (long) args[13], + (long) args[14], + (long) args[15], + (long) args[16], + (long) args[17], + (long) args[18], + (long) args[19], + (long) args[20], + new TreeMap<>( + ((List>) args[21]) + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))), + (long) args[22])); + + public static final String FETCH_EXCEPTIONS_ENTRY_PARSER_NAME = "shard-follow-node-task-status-fetch-exceptions-entry"; + + static final ConstructingObjectParser, Void> FETCH_EXCEPTIONS_ENTRY_PARSER = + new ConstructingObjectParser<>( + FETCH_EXCEPTIONS_ENTRY_PARSER_NAME, + args -> new AbstractMap.SimpleEntry<>((long) args[0], (ElasticsearchException) args[1])); + + static { + STATUS_PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_INDEX); + STATUS_PARSER.declareString(ConstructingObjectParser.constructorArg(), FOLLOWER_INDEX); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), SHARD_ID); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_GLOBAL_CHECKPOINT_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_MAX_SEQ_NO_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_GLOBAL_CHECKPOINT_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_MAX_SEQ_NO_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LAST_REQUESTED_SEQ_NO_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_READS_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_WRITES_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_QUEUED_WRITES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), MAPPING_VERSION_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_FETCH_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_FETCHES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_FETCHES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), OPERATIONS_RECEIVED_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_TRANSFERRED_BYTES); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_INDEX_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_OPERATIONS_INDEXED_FIELD); + STATUS_PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_PARSER, FETCH_EXCEPTIONS); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIME_SINCE_LAST_FETCH_MILLIS_FIELD); + } + + static final ParseField FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO = new ParseField("from_seq_no"); + static final ParseField FETCH_EXCEPTIONS_ENTRY_EXCEPTION = new ParseField("exception"); + + static { + FETCH_EXCEPTIONS_ENTRY_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO); + FETCH_EXCEPTIONS_ENTRY_PARSER.declareObject( + ConstructingObjectParser.constructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + FETCH_EXCEPTIONS_ENTRY_EXCEPTION); + } + + private final String leaderIndex; + + public String leaderIndex() { + return leaderIndex; + } + + private final String followerIndex; + + public String followerIndex() { + return followerIndex; + } + + private final int shardId; + + public int getShardId() { + return shardId; + } + + private final long leaderGlobalCheckpoint; + + public long leaderGlobalCheckpoint() { + return leaderGlobalCheckpoint; + } + + private final long leaderMaxSeqNo; + + public long leaderMaxSeqNo() { + return leaderMaxSeqNo; + } + + private final long followerGlobalCheckpoint; + + public long followerGlobalCheckpoint() { + return followerGlobalCheckpoint; + } + + private final long followerMaxSeqNo; + + public long followerMaxSeqNo() { + return followerMaxSeqNo; + } + + private final long lastRequestedSeqNo; + + public long lastRequestedSeqNo() { + return lastRequestedSeqNo; + } + + private final int numberOfConcurrentReads; + + public int numberOfConcurrentReads() { + return numberOfConcurrentReads; + } + + private final int numberOfConcurrentWrites; + + public int numberOfConcurrentWrites() { + return numberOfConcurrentWrites; + } + + private final int numberOfQueuedWrites; + + public int numberOfQueuedWrites() { + return numberOfQueuedWrites; + } + + private final long mappingVersion; + + public long mappingVersion() { + return mappingVersion; + } + + private final long totalFetchTimeMillis; + + public long totalFetchTimeMillis() { + return totalFetchTimeMillis; + } + + private final long numberOfSuccessfulFetches; + + public long numberOfSuccessfulFetches() { + return numberOfSuccessfulFetches; + } + + private final long numberOfFailedFetches; + + public long numberOfFailedFetches() { + return numberOfFailedFetches; + } + + private final long operationsReceived; + + public long operationsReceived() { + return operationsReceived; + } + + private final long totalTransferredBytes; + + public long totalTransferredBytes() { + return totalTransferredBytes; + } + + private final long totalIndexTimeMillis; + + public long totalIndexTimeMillis() { + return totalIndexTimeMillis; + } + + private final long numberOfSuccessfulBulkOperations; + + public long numberOfSuccessfulBulkOperations() { + return numberOfSuccessfulBulkOperations; + } + + private final long numberOfFailedBulkOperations; + + public long numberOfFailedBulkOperations() { + return numberOfFailedBulkOperations; + } + + private final long numberOfOperationsIndexed; + + public long numberOfOperationsIndexed() { + return numberOfOperationsIndexed; + } + + private final NavigableMap fetchExceptions; + + public NavigableMap fetchExceptions() { + return fetchExceptions; + } + + private final long timeSinceLastFetchMillis; + + public long timeSinceLastFetchMillis() { + return timeSinceLastFetchMillis; + } + + public ShardFollowNodeTaskStatus( + final String leaderIndex, + final String followerIndex, + final int shardId, + final long leaderGlobalCheckpoint, + final long leaderMaxSeqNo, + final long followerGlobalCheckpoint, + final long followerMaxSeqNo, + final long lastRequestedSeqNo, + final int numberOfConcurrentReads, + final int numberOfConcurrentWrites, + final int numberOfQueuedWrites, + final long mappingVersion, + final long totalFetchTimeMillis, + final long numberOfSuccessfulFetches, + final long numberOfFailedFetches, + final long operationsReceived, + final long totalTransferredBytes, + final long totalIndexTimeMillis, + final long numberOfSuccessfulBulkOperations, + final long numberOfFailedBulkOperations, + final long numberOfOperationsIndexed, + final NavigableMap fetchExceptions, + final long timeSinceLastFetchMillis) { + this.leaderIndex = leaderIndex; + this.followerIndex = followerIndex; + this.shardId = shardId; + this.leaderGlobalCheckpoint = leaderGlobalCheckpoint; + this.leaderMaxSeqNo = leaderMaxSeqNo; + this.followerGlobalCheckpoint = followerGlobalCheckpoint; + this.followerMaxSeqNo = followerMaxSeqNo; + this.lastRequestedSeqNo = lastRequestedSeqNo; + this.numberOfConcurrentReads = numberOfConcurrentReads; + this.numberOfConcurrentWrites = numberOfConcurrentWrites; + this.numberOfQueuedWrites = numberOfQueuedWrites; + this.mappingVersion = mappingVersion; + this.totalFetchTimeMillis = totalFetchTimeMillis; + this.numberOfSuccessfulFetches = numberOfSuccessfulFetches; + this.numberOfFailedFetches = numberOfFailedFetches; + this.operationsReceived = operationsReceived; + this.totalTransferredBytes = totalTransferredBytes; + this.totalIndexTimeMillis = totalIndexTimeMillis; + this.numberOfSuccessfulBulkOperations = numberOfSuccessfulBulkOperations; + this.numberOfFailedBulkOperations = numberOfFailedBulkOperations; + this.numberOfOperationsIndexed = numberOfOperationsIndexed; + this.fetchExceptions = Objects.requireNonNull(fetchExceptions); + this.timeSinceLastFetchMillis = timeSinceLastFetchMillis; + } + + public ShardFollowNodeTaskStatus(final StreamInput in) throws IOException { + this.leaderIndex = in.readString(); + this.followerIndex = in.readString(); + this.shardId = in.readVInt(); + this.leaderGlobalCheckpoint = in.readZLong(); + this.leaderMaxSeqNo = in.readZLong(); + this.followerGlobalCheckpoint = in.readZLong(); + this.followerMaxSeqNo = in.readZLong(); + this.lastRequestedSeqNo = in.readZLong(); + this.numberOfConcurrentReads = in.readVInt(); + this.numberOfConcurrentWrites = in.readVInt(); + this.numberOfQueuedWrites = in.readVInt(); + this.mappingVersion = in.readVLong(); + this.totalFetchTimeMillis = in.readVLong(); + this.numberOfSuccessfulFetches = in.readVLong(); + this.numberOfFailedFetches = in.readVLong(); + this.operationsReceived = in.readVLong(); + this.totalTransferredBytes = in.readVLong(); + this.totalIndexTimeMillis = in.readVLong(); + this.numberOfSuccessfulBulkOperations = in.readVLong(); + this.numberOfFailedBulkOperations = in.readVLong(); + this.numberOfOperationsIndexed = in.readVLong(); + this.fetchExceptions = new TreeMap<>(in.readMap(StreamInput::readVLong, StreamInput::readException)); + this.timeSinceLastFetchMillis = in.readZLong(); + } + + @Override + public String getWriteableName() { + return STATUS_PARSER_NAME; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(leaderIndex); + out.writeString(followerIndex); + out.writeVInt(shardId); + out.writeZLong(leaderGlobalCheckpoint); + out.writeZLong(leaderMaxSeqNo); + out.writeZLong(followerGlobalCheckpoint); + out.writeZLong(followerMaxSeqNo); + out.writeZLong(lastRequestedSeqNo); + out.writeVInt(numberOfConcurrentReads); + out.writeVInt(numberOfConcurrentWrites); + out.writeVInt(numberOfQueuedWrites); + out.writeVLong(mappingVersion); + out.writeVLong(totalFetchTimeMillis); + out.writeVLong(numberOfSuccessfulFetches); + out.writeVLong(numberOfFailedFetches); + out.writeVLong(operationsReceived); + out.writeVLong(totalTransferredBytes); + out.writeVLong(totalIndexTimeMillis); + out.writeVLong(numberOfSuccessfulBulkOperations); + out.writeVLong(numberOfFailedBulkOperations); + out.writeVLong(numberOfOperationsIndexed); + out.writeMap(fetchExceptions, StreamOutput::writeVLong, StreamOutput::writeException); + out.writeZLong(timeSinceLastFetchMillis); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + toXContentFragment(builder, params); + } + builder.endObject(); + return builder; + } + + public XContentBuilder toXContentFragment(final XContentBuilder builder, final Params params) throws IOException { + builder.field(LEADER_INDEX.getPreferredName(), leaderIndex); + builder.field(FOLLOWER_INDEX.getPreferredName(), followerIndex); + builder.field(SHARD_ID.getPreferredName(), shardId); + builder.field(LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), leaderGlobalCheckpoint); + builder.field(LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), leaderMaxSeqNo); + builder.field(FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), followerGlobalCheckpoint); + builder.field(FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), followerMaxSeqNo); + builder.field(LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), lastRequestedSeqNo); + builder.field(NUMBER_OF_CONCURRENT_READS_FIELD.getPreferredName(), numberOfConcurrentReads); + builder.field(NUMBER_OF_CONCURRENT_WRITES_FIELD.getPreferredName(), numberOfConcurrentWrites); + builder.field(NUMBER_OF_QUEUED_WRITES_FIELD.getPreferredName(), numberOfQueuedWrites); + builder.field(MAPPING_VERSION_FIELD.getPreferredName(), mappingVersion); + builder.humanReadableField( + TOTAL_FETCH_TIME_MILLIS_FIELD.getPreferredName(), + "total_fetch_time", + new TimeValue(totalFetchTimeMillis, TimeUnit.MILLISECONDS)); + builder.field(NUMBER_OF_SUCCESSFUL_FETCHES_FIELD.getPreferredName(), numberOfSuccessfulFetches); + builder.field(NUMBER_OF_FAILED_FETCHES_FIELD.getPreferredName(), numberOfFailedFetches); + builder.field(OPERATIONS_RECEIVED_FIELD.getPreferredName(), operationsReceived); + builder.humanReadableField( + TOTAL_TRANSFERRED_BYTES.getPreferredName(), + "total_transferred", + new ByteSizeValue(totalTransferredBytes, ByteSizeUnit.BYTES)); + builder.humanReadableField( + TOTAL_INDEX_TIME_MILLIS_FIELD.getPreferredName(), + "total_index_time", + new TimeValue(totalIndexTimeMillis, TimeUnit.MILLISECONDS)); + builder.field(NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfSuccessfulBulkOperations); + builder.field(NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfFailedBulkOperations); + builder.field(NUMBER_OF_OPERATIONS_INDEXED_FIELD.getPreferredName(), numberOfOperationsIndexed); + builder.startArray(FETCH_EXCEPTIONS.getPreferredName()); + { + for (final Map.Entry entry : fetchExceptions.entrySet()) { + builder.startObject(); + { + builder.field(FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), entry.getKey()); + builder.field(FETCH_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); + builder.startObject(); + { + ElasticsearchException.generateThrowableXContent(builder, params, entry.getValue()); + } + builder.endObject(); + } + builder.endObject(); + } + } + builder.endArray(); + builder.humanReadableField( + TIME_SINCE_LAST_FETCH_MILLIS_FIELD.getPreferredName(), + "time_since_last_fetch", + new TimeValue(timeSinceLastFetchMillis, TimeUnit.MILLISECONDS)); + return builder; + } + + public static ShardFollowNodeTaskStatus fromXContent(final XContentParser parser) { + return STATUS_PARSER.apply(parser, null); + } + + @Override + public boolean equals(final Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final ShardFollowNodeTaskStatus that = (ShardFollowNodeTaskStatus) o; + return leaderIndex.equals(that.leaderIndex) && + followerIndex.equals(that.followerIndex) && + shardId == that.shardId && + leaderGlobalCheckpoint == that.leaderGlobalCheckpoint && + leaderMaxSeqNo == that.leaderMaxSeqNo && + followerGlobalCheckpoint == that.followerGlobalCheckpoint && + followerMaxSeqNo == that.followerMaxSeqNo && + lastRequestedSeqNo == that.lastRequestedSeqNo && + numberOfConcurrentReads == that.numberOfConcurrentReads && + numberOfConcurrentWrites == that.numberOfConcurrentWrites && + numberOfQueuedWrites == that.numberOfQueuedWrites && + mappingVersion == that.mappingVersion && + totalFetchTimeMillis == that.totalFetchTimeMillis && + numberOfSuccessfulFetches == that.numberOfSuccessfulFetches && + numberOfFailedFetches == that.numberOfFailedFetches && + operationsReceived == that.operationsReceived && + totalTransferredBytes == that.totalTransferredBytes && + numberOfSuccessfulBulkOperations == that.numberOfSuccessfulBulkOperations && + numberOfFailedBulkOperations == that.numberOfFailedBulkOperations && + numberOfOperationsIndexed == that.numberOfOperationsIndexed && + /* + * ElasticsearchException does not implement equals so we will assume the fetch exceptions are equal if they are equal + * up to the key set and their messages. Note that we are relying on the fact that the fetch exceptions are ordered by + * keys. + */ + fetchExceptions.keySet().equals(that.fetchExceptions.keySet()) && + getFetchExceptionMessages(this).equals(getFetchExceptionMessages(that)) && + timeSinceLastFetchMillis == that.timeSinceLastFetchMillis; + } + + @Override + public int hashCode() { + return Objects.hash( + leaderIndex, + followerIndex, + shardId, + leaderGlobalCheckpoint, + leaderMaxSeqNo, + followerGlobalCheckpoint, + followerMaxSeqNo, + lastRequestedSeqNo, + numberOfConcurrentReads, + numberOfConcurrentWrites, + numberOfQueuedWrites, + mappingVersion, + totalFetchTimeMillis, + numberOfSuccessfulFetches, + numberOfFailedFetches, + operationsReceived, + totalTransferredBytes, + numberOfSuccessfulBulkOperations, + numberOfFailedBulkOperations, + numberOfOperationsIndexed, + /* + * ElasticsearchException does not implement hash code so we will compute the hash code based on the key set and the + * messages. Note that we are relying on the fact that the fetch exceptions are ordered by keys. + */ + fetchExceptions.keySet(), + getFetchExceptionMessages(this), + timeSinceLastFetchMillis); + } + + private static List getFetchExceptionMessages(final ShardFollowNodeTaskStatus status) { + return status.fetchExceptions().values().stream().map(ElasticsearchException::getMessage).collect(Collectors.toList()); + } + + public String toString() { + return Strings.toString(this); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java similarity index 71% rename from x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java index b5d6697fc73..863cb678d7e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ccr.action; +package org.elasticsearch.xpack.core.ccr.action; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; @@ -19,8 +19,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import java.io.IOException; import java.util.Collections; @@ -28,7 +28,7 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; -public class CcrStatsAction extends Action { +public class CcrStatsAction extends Action { public static final String NAME = "cluster:monitor/ccr/stats"; @@ -39,41 +39,45 @@ public class CcrStatsAction extends Action { } @Override - public TasksResponse newResponse() { - return new TasksResponse(); + public StatsResponses newResponse() { + return new StatsResponses(); } - public static class TasksResponse extends BaseTasksResponse implements ToXContentObject { + public static class StatsResponses extends BaseTasksResponse implements ToXContentObject { - private final List taskResponses; + private final List statsResponse; - public TasksResponse() { + public List getStatsResponses() { + return statsResponse; + } + + public StatsResponses() { this(Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); } - TasksResponse( + public StatsResponses( final List taskFailures, final List nodeFailures, - final List taskResponses) { + final List statsResponse) { super(taskFailures, nodeFailures); - this.taskResponses = taskResponses; + this.statsResponse = statsResponse; } @Override public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { // sort by index name, then shard ID - final Map> taskResponsesByIndex = new TreeMap<>(); - for (final TaskResponse taskResponse : taskResponses) { + final Map> taskResponsesByIndex = new TreeMap<>(); + for (final StatsResponse statsResponse : statsResponse) { taskResponsesByIndex.computeIfAbsent( - taskResponse.followerShardId().getIndexName(), - k -> new TreeMap<>()).put(taskResponse.followerShardId().getId(), taskResponse); + statsResponse.status().followerIndex(), + k -> new TreeMap<>()).put(statsResponse.status().getShardId(), statsResponse); } builder.startObject(); { - for (final Map.Entry> index : taskResponsesByIndex.entrySet()) { + for (final Map.Entry> index : taskResponsesByIndex.entrySet()) { builder.startArray(index.getKey()); { - for (final Map.Entry shard : index.getValue().entrySet()) { + for (final Map.Entry shard : index.getValue().entrySet()) { shard.getValue().status().toXContent(builder, params); } } @@ -85,7 +89,7 @@ public class CcrStatsAction extends Action { } } - public static class TasksRequest extends BaseTasksRequest implements IndicesRequest { + public static class StatsRequest extends BaseTasksRequest implements IndicesRequest { private String[] indices; @@ -143,33 +147,24 @@ public class CcrStatsAction extends Action { } - public static class TaskResponse implements Writeable { + public static class StatsResponse implements Writeable { - private final ShardId followerShardId; + private final ShardFollowNodeTaskStatus status; - ShardId followerShardId() { - return followerShardId; - } - - private final ShardFollowNodeTask.Status status; - - ShardFollowNodeTask.Status status() { + public ShardFollowNodeTaskStatus status() { return status; } - TaskResponse(final ShardId followerShardId, final ShardFollowNodeTask.Status status) { - this.followerShardId = followerShardId; + public StatsResponse(final ShardFollowNodeTaskStatus status) { this.status = status; } - TaskResponse(final StreamInput in) throws IOException { - this.followerShardId = ShardId.readShardId(in); - this.status = new ShardFollowNodeTask.Status(in); + public StatsResponse(final StreamInput in) throws IOException { + this.status = new ShardFollowNodeTaskStatus(in); } @Override public void writeTo(final StreamOutput out) throws IOException { - followerShardId.writeTo(out); status.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CreateAndFollowIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CreateAndFollowIndexAction.java new file mode 100644 index 00000000000..ea63815c2b9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CreateAndFollowIndexAction.java @@ -0,0 +1,167 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public final class CreateAndFollowIndexAction extends Action { + + public static final CreateAndFollowIndexAction INSTANCE = new CreateAndFollowIndexAction(); + public static final String NAME = "indices:admin/xpack/ccr/create_and_follow_index"; + + private CreateAndFollowIndexAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest implements IndicesRequest { + + private FollowIndexAction.Request followRequest; + + public Request(FollowIndexAction.Request followRequest) { + this.followRequest = Objects.requireNonNull(followRequest); + } + + public Request() { + + } + + public FollowIndexAction.Request getFollowRequest() { + return followRequest; + } + + @Override + public ActionRequestValidationException validate() { + return followRequest.validate(); + } + + @Override + public String[] indices() { + return new String[]{followRequest.getFollowerIndex()}; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + followRequest = new FollowIndexAction.Request(); + followRequest.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + followRequest.writeTo(out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(followRequest, request.followRequest); + } + + @Override + public int hashCode() { + return Objects.hash(followRequest); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private boolean followIndexCreated; + private boolean followIndexShardsAcked; + private boolean indexFollowingStarted; + + public Response() { + + } + + public Response(boolean followIndexCreated, boolean followIndexShardsAcked, boolean indexFollowingStarted) { + this.followIndexCreated = followIndexCreated; + this.followIndexShardsAcked = followIndexShardsAcked; + this.indexFollowingStarted = indexFollowingStarted; + } + + public boolean isFollowIndexCreated() { + return followIndexCreated; + } + + public boolean isFollowIndexShardsAcked() { + return followIndexShardsAcked; + } + + public boolean isIndexFollowingStarted() { + return indexFollowingStarted; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + followIndexCreated = in.readBoolean(); + followIndexShardsAcked = in.readBoolean(); + indexFollowingStarted = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(followIndexCreated); + out.writeBoolean(followIndexShardsAcked); + out.writeBoolean(indexFollowingStarted); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("follow_index_created", followIndexCreated); + builder.field("follow_index_shards_acked", followIndexShardsAcked); + builder.field("index_following_started", indexFollowingStarted); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return followIndexCreated == response.followIndexCreated && + followIndexShardsAcked == response.followIndexShardsAcked && + indexFollowingStarted == response.indexFollowingStarted; + } + + @Override + public int hashCode() { + return Objects.hash(followIndexCreated, followIndexShardsAcked, indexFollowingStarted); + } + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java similarity index 98% rename from x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java index 82e142202d2..6d49a370a34 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ccr.action; +package org.elasticsearch.xpack.core.ccr.action; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java new file mode 100644 index 00000000000..2c311356d49 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java @@ -0,0 +1,306 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public final class FollowIndexAction extends Action { + + public static final FollowIndexAction INSTANCE = new FollowIndexAction(); + public static final String NAME = "cluster:admin/xpack/ccr/follow_index"; + + public static final int DEFAULT_MAX_WRITE_BUFFER_SIZE = 10240; + public static final int DEFAULT_MAX_BATCH_OPERATION_COUNT = 1024; + public static final int DEFAULT_MAX_CONCURRENT_READ_BATCHES = 1; + public static final int DEFAULT_MAX_CONCURRENT_WRITE_BATCHES = 1; + public static final long DEFAULT_MAX_BATCH_SIZE_IN_BYTES = Long.MAX_VALUE; + public static final TimeValue DEFAULT_RETRY_TIMEOUT = new TimeValue(500); + public static final TimeValue DEFAULT_IDLE_SHARD_RETRY_DELAY = TimeValue.timeValueSeconds(10); + + private FollowIndexAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + private static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index"); + private static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index"); + private static final ParseField MAX_BATCH_OPERATION_COUNT = new ParseField("max_batch_operation_count"); + private static final ParseField MAX_CONCURRENT_READ_BATCHES = new ParseField("max_concurrent_read_batches"); + private static final ParseField MAX_BATCH_SIZE_IN_BYTES = new ParseField("max_batch_size_in_bytes"); + private static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); + private static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); + private static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); + private static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay"); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, + (args, followerIndex) -> { + if (args[1] != null) { + followerIndex = (String) args[1]; + } + return new Request((String) args[0], followerIndex, (Integer) args[2], (Integer) args[3], (Long) args[4], + (Integer) args[5], (Integer) args[6], (TimeValue) args[7], (TimeValue) args[8]); + }); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), LEADER_INDEX_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOWER_INDEX_FIELD); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_BATCH_OPERATION_COUNT); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_READ_BATCHES); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MAX_BATCH_SIZE_IN_BYTES); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_WRITE_BATCHES); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_BUFFER_SIZE); + PARSER.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), + MAX_RETRY_DELAY, + ObjectParser.ValueType.STRING); + PARSER.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()), + IDLE_SHARD_RETRY_DELAY, + ObjectParser.ValueType.STRING); + } + + public static Request fromXContent(final XContentParser parser, final String followerIndex) throws IOException { + Request request = PARSER.parse(parser, followerIndex); + if (followerIndex != null) { + if (request.followerIndex == null) { + request.followerIndex = followerIndex; + } else { + if (request.followerIndex.equals(followerIndex) == false) { + throw new IllegalArgumentException("provided follower_index is not equal"); + } + } + } + return request; + } + + private String leaderIndex; + + public String getLeaderIndex() { + return leaderIndex; + } + + + private String followerIndex; + + public String getFollowerIndex() { + return followerIndex; + } + + private int maxBatchOperationCount; + + public int getMaxBatchOperationCount() { + return maxBatchOperationCount; + } + + private int maxConcurrentReadBatches; + + public int getMaxConcurrentReadBatches() { + return maxConcurrentReadBatches; + } + + private long maxOperationSizeInBytes; + + public long getMaxOperationSizeInBytes() { + return maxOperationSizeInBytes; + } + + private int maxConcurrentWriteBatches; + + public int getMaxConcurrentWriteBatches() { + return maxConcurrentWriteBatches; + } + + private int maxWriteBufferSize; + + public int getMaxWriteBufferSize() { + return maxWriteBufferSize; + } + + private TimeValue maxRetryDelay; + + public TimeValue getMaxRetryDelay() { + return maxRetryDelay; + } + + private TimeValue idleShardRetryDelay; + + public TimeValue getIdleShardRetryDelay() { + return idleShardRetryDelay; + } + + public Request( + final String leaderIndex, + final String followerIndex, + final Integer maxBatchOperationCount, + final Integer maxConcurrentReadBatches, + final Long maxOperationSizeInBytes, + final Integer maxConcurrentWriteBatches, + final Integer maxWriteBufferSize, + final TimeValue maxRetryDelay, + final TimeValue idleShardRetryDelay) { + + if (leaderIndex == null) { + throw new IllegalArgumentException(LEADER_INDEX_FIELD.getPreferredName() + " is missing"); + } + + if (followerIndex == null) { + throw new IllegalArgumentException(FOLLOWER_INDEX_FIELD.getPreferredName() + " is missing"); + } + + final int actualMaxBatchOperationCount = + maxBatchOperationCount == null ? DEFAULT_MAX_BATCH_OPERATION_COUNT : maxBatchOperationCount; + if (actualMaxBatchOperationCount < 1) { + throw new IllegalArgumentException(MAX_BATCH_OPERATION_COUNT.getPreferredName() + " must be larger than 0"); + } + + final int actualMaxConcurrentReadBatches = + maxConcurrentReadBatches == null ? DEFAULT_MAX_CONCURRENT_READ_BATCHES : maxConcurrentReadBatches; + if (actualMaxConcurrentReadBatches < 1) { + throw new IllegalArgumentException(MAX_CONCURRENT_READ_BATCHES.getPreferredName() + " must be larger than 0"); + } + + final long actualMaxOperationSizeInBytes = + maxOperationSizeInBytes == null ? DEFAULT_MAX_BATCH_SIZE_IN_BYTES : maxOperationSizeInBytes; + if (actualMaxOperationSizeInBytes <= 0) { + throw new IllegalArgumentException(MAX_BATCH_SIZE_IN_BYTES.getPreferredName() + " must be larger than 0"); + } + + final int actualMaxConcurrentWriteBatches = + maxConcurrentWriteBatches == null ? DEFAULT_MAX_CONCURRENT_WRITE_BATCHES : maxConcurrentWriteBatches; + if (actualMaxConcurrentWriteBatches < 1) { + throw new IllegalArgumentException(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName() + " must be larger than 0"); + } + + final int actualMaxWriteBufferSize = maxWriteBufferSize == null ? DEFAULT_MAX_WRITE_BUFFER_SIZE : maxWriteBufferSize; + if (actualMaxWriteBufferSize < 1) { + throw new IllegalArgumentException(MAX_WRITE_BUFFER_SIZE.getPreferredName() + " must be larger than 0"); + } + + final TimeValue actualRetryTimeout = maxRetryDelay == null ? DEFAULT_RETRY_TIMEOUT : maxRetryDelay; + final TimeValue actualIdleShardRetryDelay = idleShardRetryDelay == null ? DEFAULT_IDLE_SHARD_RETRY_DELAY : idleShardRetryDelay; + + this.leaderIndex = leaderIndex; + this.followerIndex = followerIndex; + this.maxBatchOperationCount = actualMaxBatchOperationCount; + this.maxConcurrentReadBatches = actualMaxConcurrentReadBatches; + this.maxOperationSizeInBytes = actualMaxOperationSizeInBytes; + this.maxConcurrentWriteBatches = actualMaxConcurrentWriteBatches; + this.maxWriteBufferSize = actualMaxWriteBufferSize; + this.maxRetryDelay = actualRetryTimeout; + this.idleShardRetryDelay = actualIdleShardRetryDelay; + } + + public Request() { + + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + leaderIndex = in.readString(); + followerIndex = in.readString(); + maxBatchOperationCount = in.readVInt(); + maxConcurrentReadBatches = in.readVInt(); + maxOperationSizeInBytes = in.readVLong(); + maxConcurrentWriteBatches = in.readVInt(); + maxWriteBufferSize = in.readVInt(); + maxRetryDelay = in.readOptionalTimeValue(); + idleShardRetryDelay = in.readOptionalTimeValue(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(leaderIndex); + out.writeString(followerIndex); + out.writeVInt(maxBatchOperationCount); + out.writeVInt(maxConcurrentReadBatches); + out.writeVLong(maxOperationSizeInBytes); + out.writeVInt(maxConcurrentWriteBatches); + out.writeVInt(maxWriteBufferSize); + out.writeOptionalTimeValue(maxRetryDelay); + out.writeOptionalTimeValue(idleShardRetryDelay); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex); + builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); + builder.field(MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); + builder.field(MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes); + builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + builder.field(MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); + builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); + builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); + builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(final Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return maxBatchOperationCount == request.maxBatchOperationCount && + maxConcurrentReadBatches == request.maxConcurrentReadBatches && + maxOperationSizeInBytes == request.maxOperationSizeInBytes && + maxConcurrentWriteBatches == request.maxConcurrentWriteBatches && + maxWriteBufferSize == request.maxWriteBufferSize && + Objects.equals(maxRetryDelay, request.maxRetryDelay) && + Objects.equals(idleShardRetryDelay, request.idleShardRetryDelay) && + Objects.equals(leaderIndex, request.leaderIndex) && + Objects.equals(followerIndex, request.followerIndex); + } + + @Override + public int hashCode() { + return Objects.hash( + leaderIndex, + followerIndex, + maxBatchOperationCount, + maxConcurrentReadBatches, + maxOperationSizeInBytes, + maxConcurrentWriteBatches, + maxWriteBufferSize, + maxRetryDelay, + idleShardRetryDelay + ); + } + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java similarity index 86% rename from x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index a01fd8e3bc2..dc69795bb4a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ccr.action; +package org.elasticsearch.xpack.core.ccr.action; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; @@ -56,12 +56,12 @@ public class PutAutoFollowPatternAction extends Action { PARSER.declareLong(Request::setMaxOperationSizeInBytes, AutoFollowPattern.MAX_BATCH_SIZE_IN_BYTES); PARSER.declareInt(Request::setMaxConcurrentWriteBatches, AutoFollowPattern.MAX_CONCURRENT_WRITE_BATCHES); PARSER.declareInt(Request::setMaxWriteBufferSize, AutoFollowPattern.MAX_WRITE_BUFFER_SIZE); - PARSER.declareField(Request::setRetryTimeout, - (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.RETRY_TIMEOUT.getPreferredName()), - ShardFollowTask.RETRY_TIMEOUT, ObjectParser.ValueType.STRING); + PARSER.declareField(Request::setMaxRetryDelay, + (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.MAX_RETRY_DELAY.getPreferredName()), + AutoFollowPattern.MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); PARSER.declareField(Request::setIdleShardRetryDelay, (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.IDLE_SHARD_RETRY_DELAY.getPreferredName()), - ShardFollowTask.IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); + AutoFollowPattern.IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); } public static Request fromXContent(XContentParser parser, String remoteClusterAlias) throws IOException { @@ -87,7 +87,7 @@ public class PutAutoFollowPatternAction extends Action { private Long maxOperationSizeInBytes; private Integer maxConcurrentWriteBatches; private Integer maxWriteBufferSize; - private TimeValue retryTimeout; + private TimeValue maxRetryDelay; private TimeValue idleShardRetryDelay; @Override @@ -166,12 +166,12 @@ public class PutAutoFollowPatternAction extends Action { this.maxWriteBufferSize = maxWriteBufferSize; } - public TimeValue getRetryTimeout() { - return retryTimeout; + public TimeValue getMaxRetryDelay() { + return maxRetryDelay; } - public void setRetryTimeout(TimeValue retryTimeout) { - this.retryTimeout = retryTimeout; + public void setMaxRetryDelay(TimeValue maxRetryDelay) { + this.maxRetryDelay = maxRetryDelay; } public TimeValue getIdleShardRetryDelay() { @@ -193,7 +193,7 @@ public class PutAutoFollowPatternAction extends Action { maxOperationSizeInBytes = in.readOptionalLong(); maxConcurrentWriteBatches = in.readOptionalVInt(); maxWriteBufferSize = in.readOptionalVInt(); - retryTimeout = in.readOptionalTimeValue(); + maxRetryDelay = in.readOptionalTimeValue(); idleShardRetryDelay = in.readOptionalTimeValue(); } @@ -208,7 +208,7 @@ public class PutAutoFollowPatternAction extends Action { out.writeOptionalLong(maxOperationSizeInBytes); out.writeOptionalVInt(maxConcurrentWriteBatches); out.writeOptionalVInt(maxWriteBufferSize); - out.writeOptionalTimeValue(retryTimeout); + out.writeOptionalTimeValue(maxRetryDelay); out.writeOptionalTimeValue(idleShardRetryDelay); } @@ -222,25 +222,25 @@ public class PutAutoFollowPatternAction extends Action { builder.field(FOLLOW_INDEX_NAME_PATTERN_FIELD.getPreferredName(), followIndexNamePattern); } if (maxBatchOperationCount != null) { - builder.field(ShardFollowTask.MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); + builder.field(AutoFollowPattern.MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); } if (maxOperationSizeInBytes != null) { - builder.field(ShardFollowTask.MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes); + builder.field(AutoFollowPattern.MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes); } if (maxWriteBufferSize != null) { - builder.field(ShardFollowTask.MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + builder.field(AutoFollowPattern.MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); } if (maxConcurrentReadBatches != null) { - builder.field(ShardFollowTask.MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); + builder.field(AutoFollowPattern.MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); } if (maxConcurrentWriteBatches != null) { - builder.field(ShardFollowTask.MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); + builder.field(AutoFollowPattern.MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); } - if (retryTimeout != null) { - builder.field(ShardFollowTask.RETRY_TIMEOUT.getPreferredName(), retryTimeout.getStringRep()); + if (maxRetryDelay != null) { + builder.field(AutoFollowPattern.MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); } if (idleShardRetryDelay != null) { - builder.field(ShardFollowTask.IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); + builder.field(AutoFollowPattern.IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); } } builder.endObject(); @@ -260,7 +260,7 @@ public class PutAutoFollowPatternAction extends Action { Objects.equals(maxOperationSizeInBytes, request.maxOperationSizeInBytes) && Objects.equals(maxConcurrentWriteBatches, request.maxConcurrentWriteBatches) && Objects.equals(maxWriteBufferSize, request.maxWriteBufferSize) && - Objects.equals(retryTimeout, request.retryTimeout) && + Objects.equals(maxRetryDelay, request.maxRetryDelay) && Objects.equals(idleShardRetryDelay, request.idleShardRetryDelay); } @@ -275,7 +275,7 @@ public class PutAutoFollowPatternAction extends Action { maxOperationSizeInBytes, maxConcurrentWriteBatches, maxWriteBufferSize, - retryTimeout, + maxRetryDelay, idleShardRetryDelay ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowIndexAction.java new file mode 100644 index 00000000000..65ecd3dad2f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowIndexAction.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class UnfollowIndexAction extends Action { + + public static final UnfollowIndexAction INSTANCE = new UnfollowIndexAction(); + public static final String NAME = "cluster:admin/xpack/ccr/unfollow_index"; + + private UnfollowIndexAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + public static class Request extends ActionRequest { + + private String followIndex; + + public String getFollowIndex() { + return followIndex; + } + + public void setFollowIndex(final String followIndex) { + this.followIndex = followIndex; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + followIndex = in.readString(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(followIndex); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java new file mode 100644 index 00000000000..3100dae9edf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ccr.client; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; +import org.elasticsearch.xpack.core.ccr.action.UnfollowIndexAction; + +import java.util.Objects; + +public class CcrClient { + + private final ElasticsearchClient client; + + public CcrClient(final ElasticsearchClient client) { + this.client = Objects.requireNonNull(client, "client"); + } + + public void createAndFollow( + final CreateAndFollowIndexAction.Request request, + final ActionListener listener) { + client.execute(CreateAndFollowIndexAction.INSTANCE, request, listener); + } + + public ActionFuture createAndFollow(final CreateAndFollowIndexAction.Request request) { + final PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(CreateAndFollowIndexAction.INSTANCE, request, listener); + return listener; + } + + public void follow(final FollowIndexAction.Request request, final ActionListener listener) { + client.execute(FollowIndexAction.INSTANCE, request, listener); + } + + public ActionFuture follow(final FollowIndexAction.Request request) { + final PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(FollowIndexAction.INSTANCE, request, listener); + return listener; + } + + public void stats( + final CcrStatsAction.StatsRequest request, + final ActionListener listener) { + client.execute(CcrStatsAction.INSTANCE, request, listener); + } + + public ActionFuture stats(final CcrStatsAction.StatsRequest request) { + final PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(CcrStatsAction.INSTANCE, request, listener); + return listener; + } + + public void unfollow(final UnfollowIndexAction.Request request, final ActionListener listener) { + client.execute(UnfollowIndexAction.INSTANCE, request, listener); + } + + public ActionFuture unfollow(final UnfollowIndexAction.Request request) { + final PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(UnfollowIndexAction.INSTANCE, request, listener); + return listener; + } + + public void putAutoFollowPattern( + final PutAutoFollowPatternAction.Request request, + final ActionListener listener) { + client.execute(PutAutoFollowPatternAction.INSTANCE, request, listener); + } + + public ActionFuture putAutoFollowPattern(final PutAutoFollowPatternAction.Request request) { + final PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(PutAutoFollowPatternAction.INSTANCE, request, listener); + return listener; + } + + public void deleteAutoFollowPattern( + final DeleteAutoFollowPatternAction.Request request, + final ActionListener listener) { + client.execute(DeleteAutoFollowPatternAction.INSTANCE, request, listener); + } + + public ActionFuture deleteAutoFollowPattern(final DeleteAutoFollowPatternAction.Request request) { + final PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(DeleteAutoFollowPatternAction.INSTANCE, request, listener); + return listener; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java index 5484f9f9902..dd508dfb36b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java @@ -92,7 +92,7 @@ public class FileStructure implements ToXContentObject, Writeable { static final ParseField STRUCTURE = new ParseField("format"); static final ParseField MULTILINE_START_PATTERN = new ParseField("multiline_start_pattern"); static final ParseField EXCLUDE_LINES_PATTERN = new ParseField("exclude_lines_pattern"); - static final ParseField INPUT_FIELDS = new ParseField("input_fields"); + static final ParseField COLUMN_NAMES = new ParseField("column_names"); static final ParseField HAS_HEADER_ROW = new ParseField("has_header_row"); static final ParseField DELIMITER = new ParseField("delimiter"); static final ParseField SHOULD_TRIM_FIELDS = new ParseField("should_trim_fields"); @@ -115,7 +115,7 @@ public class FileStructure implements ToXContentObject, Writeable { PARSER.declareString((p, c) -> p.setFormat(Format.fromString(c)), STRUCTURE); PARSER.declareString(Builder::setMultilineStartPattern, MULTILINE_START_PATTERN); PARSER.declareString(Builder::setExcludeLinesPattern, EXCLUDE_LINES_PATTERN); - PARSER.declareStringArray(Builder::setInputFields, INPUT_FIELDS); + PARSER.declareStringArray(Builder::setColumnNames, COLUMN_NAMES); PARSER.declareBoolean(Builder::setHasHeaderRow, HAS_HEADER_ROW); PARSER.declareString((p, c) -> p.setDelimiter(c.charAt(0)), DELIMITER); PARSER.declareBoolean(Builder::setShouldTrimFields, SHOULD_TRIM_FIELDS); @@ -142,7 +142,7 @@ public class FileStructure implements ToXContentObject, Writeable { private final Format format; private final String multilineStartPattern; private final String excludeLinesPattern; - private final List inputFields; + private final List columnNames; private final Boolean hasHeaderRow; private final Character delimiter; private final Boolean shouldTrimFields; @@ -155,7 +155,7 @@ public class FileStructure implements ToXContentObject, Writeable { private final List explanation; public FileStructure(int numLinesAnalyzed, int numMessagesAnalyzed, String sampleStart, String charset, Boolean hasByteOrderMarker, - Format format, String multilineStartPattern, String excludeLinesPattern, List inputFields, + Format format, String multilineStartPattern, String excludeLinesPattern, List columnNames, Boolean hasHeaderRow, Character delimiter, Boolean shouldTrimFields, String grokPattern, String timestampField, List timestampFormats, boolean needClientTimezone, Map mappings, Map fieldStats, List explanation) { @@ -168,7 +168,7 @@ public class FileStructure implements ToXContentObject, Writeable { this.format = Objects.requireNonNull(format); this.multilineStartPattern = multilineStartPattern; this.excludeLinesPattern = excludeLinesPattern; - this.inputFields = (inputFields == null) ? null : Collections.unmodifiableList(new ArrayList<>(inputFields)); + this.columnNames = (columnNames == null) ? null : Collections.unmodifiableList(new ArrayList<>(columnNames)); this.hasHeaderRow = hasHeaderRow; this.delimiter = delimiter; this.shouldTrimFields = shouldTrimFields; @@ -190,7 +190,7 @@ public class FileStructure implements ToXContentObject, Writeable { format = in.readEnum(Format.class); multilineStartPattern = in.readOptionalString(); excludeLinesPattern = in.readOptionalString(); - inputFields = in.readBoolean() ? Collections.unmodifiableList(in.readList(StreamInput::readString)) : null; + columnNames = in.readBoolean() ? Collections.unmodifiableList(in.readList(StreamInput::readString)) : null; hasHeaderRow = in.readOptionalBoolean(); delimiter = in.readBoolean() ? (char) in.readVInt() : null; shouldTrimFields = in.readOptionalBoolean(); @@ -213,11 +213,11 @@ public class FileStructure implements ToXContentObject, Writeable { out.writeEnum(format); out.writeOptionalString(multilineStartPattern); out.writeOptionalString(excludeLinesPattern); - if (inputFields == null) { + if (columnNames == null) { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeCollection(inputFields, StreamOutput::writeString); + out.writeCollection(columnNames, StreamOutput::writeString); } out.writeOptionalBoolean(hasHeaderRow); if (delimiter == null) { @@ -273,8 +273,8 @@ public class FileStructure implements ToXContentObject, Writeable { return excludeLinesPattern; } - public List getInputFields() { - return inputFields; + public List getColumnNames() { + return columnNames; } public Boolean getHasHeaderRow() { @@ -335,8 +335,8 @@ public class FileStructure implements ToXContentObject, Writeable { if (excludeLinesPattern != null && excludeLinesPattern.isEmpty() == false) { builder.field(EXCLUDE_LINES_PATTERN.getPreferredName(), excludeLinesPattern); } - if (inputFields != null && inputFields.isEmpty() == false) { - builder.field(INPUT_FIELDS.getPreferredName(), inputFields); + if (columnNames != null && columnNames.isEmpty() == false) { + builder.field(COLUMN_NAMES.getPreferredName(), columnNames); } if (hasHeaderRow != null) { builder.field(HAS_HEADER_ROW.getPreferredName(), hasHeaderRow.booleanValue()); @@ -377,7 +377,7 @@ public class FileStructure implements ToXContentObject, Writeable { public int hashCode() { return Objects.hash(numLinesAnalyzed, numMessagesAnalyzed, sampleStart, charset, hasByteOrderMarker, format, - multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, timestampField, + multilineStartPattern, excludeLinesPattern, columnNames, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, timestampField, timestampFormats, needClientTimezone, mappings, fieldStats, explanation); } @@ -402,7 +402,7 @@ public class FileStructure implements ToXContentObject, Writeable { Objects.equals(this.format, that.format) && Objects.equals(this.multilineStartPattern, that.multilineStartPattern) && Objects.equals(this.excludeLinesPattern, that.excludeLinesPattern) && - Objects.equals(this.inputFields, that.inputFields) && + Objects.equals(this.columnNames, that.columnNames) && Objects.equals(this.hasHeaderRow, that.hasHeaderRow) && Objects.equals(this.delimiter, that.delimiter) && Objects.equals(this.shouldTrimFields, that.shouldTrimFields) && @@ -424,7 +424,7 @@ public class FileStructure implements ToXContentObject, Writeable { private Format format; private String multilineStartPattern; private String excludeLinesPattern; - private List inputFields; + private List columnNames; private Boolean hasHeaderRow; private Character delimiter; private Boolean shouldTrimFields; @@ -484,8 +484,8 @@ public class FileStructure implements ToXContentObject, Writeable { return this; } - public Builder setInputFields(List inputFields) { - this.inputFields = inputFields; + public Builder setColumnNames(List columnNames) { + this.columnNames = columnNames; return this; } @@ -573,6 +573,9 @@ public class FileStructure implements ToXContentObject, Writeable { } // $FALL-THROUGH$ case XML: + if (columnNames != null) { + throw new IllegalArgumentException("Column names may not be specified for [" + format + "] structures."); + } if (hasHeaderRow != null) { throw new IllegalArgumentException("Has header row may not be specified for [" + format + "] structures."); } @@ -584,8 +587,8 @@ public class FileStructure implements ToXContentObject, Writeable { } break; case DELIMITED: - if (inputFields == null || inputFields.isEmpty()) { - throw new IllegalArgumentException("Input fields must be specified for [" + format + "] structures."); + if (columnNames == null || columnNames.isEmpty()) { + throw new IllegalArgumentException("Column names must be specified for [" + format + "] structures."); } if (hasHeaderRow == null) { throw new IllegalArgumentException("Has header row must be specified for [" + format + "] structures."); @@ -598,8 +601,8 @@ public class FileStructure implements ToXContentObject, Writeable { } break; case SEMI_STRUCTURED_TEXT: - if (inputFields != null) { - throw new IllegalArgumentException("Input fields may not be specified for [" + format + "] structures."); + if (columnNames != null) { + throw new IllegalArgumentException("Column names may not be specified for [" + format + "] structures."); } if (hasHeaderRow != null) { throw new IllegalArgumentException("Has header row may not be specified for [" + format + "] structures."); @@ -635,7 +638,7 @@ public class FileStructure implements ToXContentObject, Writeable { } return new FileStructure(numLinesAnalyzed, numMessagesAnalyzed, sampleStart, charset, hasByteOrderMarker, format, - multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, + multilineStartPattern, excludeLinesPattern, columnNames, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, timestampField, timestampFormats, needClientTimezone, mappings, fieldStats, explanation); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java index 60b598a3a99..e0dc36b4117 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java @@ -110,7 +110,7 @@ public class SecurityIndexSearcherWrapper extends IndexSearcherWrapper { @Override protected DirectoryReader wrap(DirectoryReader reader) { - if (licenseState.isSecurityEnabled() == false || licenseState.isDocumentAndFieldLevelSecurityAllowed() == false) { + if (licenseState.isDocumentAndFieldLevelSecurityAllowed() == false) { return reader; } @@ -171,7 +171,7 @@ public class SecurityIndexSearcherWrapper extends IndexSearcherWrapper { @Override protected IndexSearcher wrap(IndexSearcher searcher) throws EngineException { - if (licenseState.isSecurityEnabled() == false || licenseState.isDocumentAndFieldLevelSecurityAllowed() == false) { + if (licenseState.isDocumentAndFieldLevelSecurityAllowed() == false) { return searcher; } diff --git a/x-pack/plugin/core/src/main/resources/monitoring-es.json b/x-pack/plugin/core/src/main/resources/monitoring-es.json index a1726a7a74a..83c9fe70e11 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-es.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-es.json @@ -916,6 +916,86 @@ } } } + }, + "ccr_stats": { + "properties": { + "leader_index": { + "type": "keyword" + }, + "follower_index": { + "type": "keyword" + }, + "shard_id": { + "type": "integer" + }, + "leader_global_checkpoint": { + "type": "long" + }, + "leader_max_seq_no": { + "type": "long" + }, + "follower_global_checkpoint": { + "type": "long" + }, + "follower_max_seq_no": { + "type": "long" + }, + "last_requested_seq_no": { + "type": "long" + }, + "number_of_concurrent_reads": { + "type": "long" + }, + "number_of_concurrent_writes": { + "type": "long" + }, + "number_of_queued_writes": { + "type": "long" + }, + "mapping_version": { + "type": "long" + }, + "total_fetch_time_millis": { + "type": "long" + }, + "number_of_successful_fetches": { + "type": "long" + }, + "number_of_failed_fetches": { + "type": "long" + }, + "operations_received": { + "type": "long" + }, + "total_transferred_bytes": { + "type": "long" + }, + "total_index_time_millis": { + "type": "long" + }, + "number_of_successful_bulk_operations": { + "type": "long" + }, + "number_of_failed_bulk_operations": { + "type": "long" + }, + "number_of_operations_indexed": { + "type": "long" + }, + "fetch_exceptions": { + "properties": { + "from_seq_no": { + "type": "long" + }, + "exception": { + "type": "text" + } + } + }, + "time_since_last_fetch_millis": { + "type": "long" + } + } } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index c2cb5af1305..76b735dc78a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -92,13 +92,13 @@ public class XPackLicenseStateTests extends ESTestCase { assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); licenseState = new XPackLicenseState(Settings.EMPTY); - assertThat(licenseState.isAuthAllowed(), is(true)); - assertThat(licenseState.isIpFilteringAllowed(), is(true)); - assertThat(licenseState.isAuditingAllowed(), is(true)); + assertThat(licenseState.isAuthAllowed(), is(false)); + assertThat(licenseState.isIpFilteringAllowed(), is(false)); + assertThat(licenseState.isAuditingAllowed(), is(false)); assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); - assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(true)); - assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); - assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NONE)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); } public void testSecurityBasic() { @@ -217,21 +217,21 @@ public class XPackLicenseStateTests extends ESTestCase { XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); licenseState.update(TRIAL, true, VersionUtils.randomVersionBetween(random(), Version.V_6_3_0, Version.CURRENT)); - assertThat(licenseState.isSecurityEnabled(), is(false)); - assertThat(licenseState.isAuthAllowed(), is(true)); - assertThat(licenseState.isIpFilteringAllowed(), is(true)); - assertThat(licenseState.isAuditingAllowed(), is(true)); + assertThat(licenseState.isSecurityDisabledByTrialLicense(), is(true)); + assertThat(licenseState.isAuthAllowed(), is(false)); + assertThat(licenseState.isIpFilteringAllowed(), is(false)); + assertThat(licenseState.isAuditingAllowed(), is(false)); assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); - assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(true)); - assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); - assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NONE)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); } public void testOldTrialDefaultsSecurityOn() { XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); licenseState.update(TRIAL, true, rarely() ? null : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_2_4)); - assertThat(licenseState.isSecurityEnabled(), is(true)); + assertThat(licenseState.isSecurityDisabledByTrialLicense(), is(false)); assertThat(licenseState.isAuthAllowed(), is(true)); assertThat(licenseState.isIpFilteringAllowed(), is(true)); assertThat(licenseState.isAuditingAllowed(), is(true)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java new file mode 100644 index 00000000000..6d3a17e3ebf --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java @@ -0,0 +1,291 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.snapshots; + +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MockEngineFactoryPlugin; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.slice.SliceBuilder; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESIntegTestCase; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.function.Consumer; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; + +public class SourceOnlySnapshotIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + Collection> classes = new ArrayList<>(super.nodePlugins()); + classes.add(MyPlugin.class); + return classes; + } + + @Override + protected Collection> getMockPlugins() { + Collection> classes = new ArrayList<>(super.getMockPlugins()); + classes.remove(MockEngineFactoryPlugin.class); + return classes; + } + + public static final class MyPlugin extends Plugin implements RepositoryPlugin, EnginePlugin { + @Override + public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { + return Collections.singletonMap("source", SourceOnlySnapshotRepository.newRepositoryFactory()); + } + @Override + public Optional getEngineFactory(IndexSettings indexSettings) { + if (indexSettings.getValue(SourceOnlySnapshotRepository.SOURCE_ONLY)) { + return Optional.of(SourceOnlySnapshotRepository.getEngineFactory()); + } + return Optional.empty(); + } + + @Override + public List> getSettings() { + List> settings = new ArrayList<>(super.getSettings()); + settings.add(SourceOnlySnapshotRepository.SOURCE_ONLY); + return settings; + } + } + + public void testSnapshotAndRestore() throws Exception { + final String sourceIdx = "test-idx"; + boolean requireRouting = randomBoolean(); + boolean useNested = randomBoolean(); + IndexRequestBuilder[] builders = snashotAndRestore(sourceIdx, 1, true, requireRouting, useNested); + assertHits(sourceIdx, builders.length); + assertMappings(sourceIdx, requireRouting, useNested); + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> { + client().prepareSearch(sourceIdx).setQuery(QueryBuilders.idsQuery() + .addIds("" + randomIntBetween(0, builders.length))).get(); + }); + assertTrue(e.toString().contains("_source only indices can't be searched or filtered")); + + e = expectThrows(SearchPhaseExecutionException.class, () -> + client().prepareSearch(sourceIdx).setQuery(QueryBuilders.termQuery("field1", "bar")).get()); + assertTrue(e.toString().contains("_source only indices can't be searched or filtered")); + // make sure deletes do not work + String idToDelete = "" + randomIntBetween(0, builders.length); + expectThrows(ClusterBlockException.class, () -> client().prepareDelete(sourceIdx, "_doc", idToDelete) + .setRouting("r" + idToDelete).get()); + internalCluster().ensureAtLeastNumDataNodes(2); + client().admin().indices().prepareUpdateSettings(sourceIdx) + .setSettings(Settings.builder().put("index.number_of_replicas", 1)).get(); + ensureGreen(sourceIdx); + assertHits(sourceIdx, builders.length); + } + + public void testSnapshotAndRestoreWithNested() throws Exception { + final String sourceIdx = "test-idx"; + boolean requireRouting = randomBoolean(); + IndexRequestBuilder[] builders = snashotAndRestore(sourceIdx, 1, true, requireRouting, true); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().clear().setDocs(true).get(); + assertThat(indicesStatsResponse.getTotal().docs.getDeleted(), Matchers.greaterThan(0L)); + assertHits(sourceIdx, builders.length); + assertMappings(sourceIdx, requireRouting, true); + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> + client().prepareSearch(sourceIdx).setQuery(QueryBuilders.idsQuery().addIds("" + randomIntBetween(0, builders.length))).get()); + assertTrue(e.toString().contains("_source only indices can't be searched or filtered")); + e = expectThrows(SearchPhaseExecutionException.class, () -> + client().prepareSearch(sourceIdx).setQuery(QueryBuilders.termQuery("field1", "bar")).get()); + assertTrue(e.toString().contains("_source only indices can't be searched or filtered")); + // make sure deletes do not work + String idToDelete = "" + randomIntBetween(0, builders.length); + expectThrows(ClusterBlockException.class, () -> client().prepareDelete(sourceIdx, "_doc", idToDelete) + .setRouting("r" + idToDelete).get()); + internalCluster().ensureAtLeastNumDataNodes(2); + client().admin().indices().prepareUpdateSettings(sourceIdx).setSettings(Settings.builder().put("index.number_of_replicas", 1)) + .get(); + ensureGreen(sourceIdx); + assertHits(sourceIdx, builders.length); + } + + private void assertMappings(String sourceIdx, boolean requireRouting, boolean useNested) throws IOException { + GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(sourceIdx).get(); + ImmutableOpenMap mapping = getMappingsResponse + .getMappings().get(sourceIdx); + assertTrue(mapping.containsKey("_doc")); + String nested = useNested ? + ",\"incorrect\":{\"type\":\"object\"},\"nested\":{\"type\":\"nested\",\"properties\":{\"value\":{\"type\":\"long\"}}}" : ""; + if (requireRouting) { + assertEquals("{\"_doc\":{\"enabled\":false," + + "\"_meta\":{\"_doc\":{\"_routing\":{\"required\":true}," + + "\"properties\":{\"field1\":{\"type\":\"text\"," + + "\"fields\":{\"keyword\":{\"type\":\"keyword\",\"ignore_above\":256}}}" + nested + + "}}}}}", mapping.get("_doc").source().string()); + } else { + assertEquals("{\"_doc\":{\"enabled\":false," + + "\"_meta\":{\"_doc\":{\"properties\":{\"field1\":{\"type\":\"text\"," + + "\"fields\":{\"keyword\":{\"type\":\"keyword\",\"ignore_above\":256}}}" + nested + "}}}}}", + mapping.get("_doc").source().string()); + } + } + + private void assertHits(String index, int numDocsExpected) { + SearchResponse searchResponse = client().prepareSearch(index) + .addSort(SeqNoFieldMapper.NAME, SortOrder.ASC) + .setSize(numDocsExpected).get(); + Consumer assertConsumer = res -> { + SearchHits hits = res.getHits(); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().clear().setDocs(true).get(); + long deleted = indicesStatsResponse.getTotal().docs.getDeleted(); + boolean allowHoles = deleted > 0; // we use indexRandom which might create holes ie. deleted docs + long i = 0; + for (SearchHit hit : hits) { + String id = hit.getId(); + Map sourceAsMap = hit.getSourceAsMap(); + assertTrue(sourceAsMap.containsKey("field1")); + if (allowHoles) { + long seqId = ((Number) hit.getSortValues()[0]).longValue(); + assertThat(i, Matchers.lessThanOrEqualTo(seqId)); + i = seqId + 1; + } else { + assertEquals(i++, hit.getSortValues()[0]); + } + assertEquals("bar " + id, sourceAsMap.get("field1")); + assertEquals("r" + id, hit.field("_routing").getValue()); + } + }; + assertConsumer.accept(searchResponse); + assertEquals(numDocsExpected, searchResponse.getHits().totalHits); + searchResponse = client().prepareSearch(index) + .addSort(SeqNoFieldMapper.NAME, SortOrder.ASC) + .setScroll("1m") + .slice(new SliceBuilder(SeqNoFieldMapper.NAME, randomIntBetween(0,1), 2)) + .setSize(randomIntBetween(1, 10)).get(); + do { + // now do a scroll with a slice + assertConsumer.accept(searchResponse); + searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(1)).get(); + } while (searchResponse.getHits().getHits().length > 0); + + } + + private IndexRequestBuilder[] snashotAndRestore(String sourceIdx, int numShards, boolean minimal, boolean requireRouting, boolean + useNested) + throws ExecutionException, InterruptedException, IOException { + logger.info("--> starting a master node and a data node"); + internalCluster().startMasterOnlyNode(); + internalCluster().startDataOnlyNode(); + + final Client client = client(); + final String repo = "test-repo"; + final String snapshot = "test-snap"; + + logger.info("--> creating repository"); + assertAcked(client.admin().cluster().preparePutRepository(repo).setType("source") + .setSettings(Settings.builder().put("location", randomRepoPath()) + .put("delegate_type", "fs") + .put("restore_minimal", minimal) + .put("compress", randomBoolean()))); + + CreateIndexRequestBuilder createIndexRequestBuilder = prepareCreate(sourceIdx, 0, Settings.builder() + .put("number_of_shards", numShards).put("number_of_replicas", 0)); + List mappings = new ArrayList<>(); + if (requireRouting) { + mappings.addAll(Arrays.asList("_routing", "required=true")); + } + + if (useNested) { + mappings.addAll(Arrays.asList("nested", "type=nested", "incorrect", "type=object")); + } + if (mappings.isEmpty() == false) { + createIndexRequestBuilder.addMapping("_doc", mappings.toArray()); + } + assertAcked(createIndexRequestBuilder); + ensureGreen(); + + logger.info("--> indexing some data"); + IndexRequestBuilder[] builders = new IndexRequestBuilder[randomIntBetween(10, 100)]; + for (int i = 0; i < builders.length; i++) { + XContentBuilder source = jsonBuilder() + .startObject() + .field("field1", "bar " + i); + if (useNested) { + source.startArray("nested"); + for (int j = 0; j < 2; ++j) { + source = source.startObject().field("value", i + 1 + j).endObject(); + } + source.endArray(); + } + source.endObject(); + builders[i] = client().prepareIndex(sourceIdx, "_doc", + Integer.toString(i)).setSource(source).setRouting("r" + i); + } + indexRandom(true, builders); + flushAndRefresh(); + assertHitCount(client().prepareSearch(sourceIdx).setQuery(QueryBuilders.idsQuery().addIds("0")).get(), 1); + + logger.info("--> snapshot the index"); + CreateSnapshotResponse createResponse = client.admin().cluster() + .prepareCreateSnapshot(repo, snapshot) + .setWaitForCompletion(true).setIndices(sourceIdx).get(); + assertEquals(SnapshotState.SUCCESS, createResponse.getSnapshotInfo().state()); + + logger.info("--> delete index and stop the data node"); + assertAcked(client.admin().indices().prepareDelete(sourceIdx).get()); + internalCluster().stopRandomDataNode(); + client().admin().cluster().prepareHealth().setTimeout("30s").setWaitForNodes("1"); + + logger.info("--> start a new data node"); + final Settings dataSettings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLength(5)) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) // to get a new node id + .build(); + internalCluster().startDataOnlyNode(dataSettings); + client().admin().cluster().prepareHealth().setTimeout("30s").setWaitForNodes("2"); + + logger.info("--> restore the index and ensure all shards are allocated"); + RestoreSnapshotResponse restoreResponse = client().admin().cluster() + .prepareRestoreSnapshot(repo, snapshot).setWaitForCompletion(true) + .setIndices(sourceIdx).get(); + assertEquals(restoreResponse.getRestoreInfo().totalShards(), + restoreResponse.getRestoreInfo().successfulShards()); + ensureYellow(); + return builders; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java new file mode 100644 index 00000000000..7058724ecf0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java @@ -0,0 +1,358 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.snapshots; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.util.Bits; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.engine.InternalEngineFactory; +import org.elasticsearch.index.fieldvisitor.FieldsVisitor; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.threadpool.ThreadPool; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.index.mapper.SourceToParse.source; + +public class SourceOnlySnapshotShardTests extends IndexShardTestCase { + + public void testSourceIncomplete() throws IOException { + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId("index", "_na_", 0), randomAlphaOfLength(10), true, + ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder(shardRouting.getIndexName()) + .settings(settings) + .primaryTerm(0, primaryTerm) + .putMapping("_doc", + "{\"_source\":{\"enabled\": false}}").build(); + IndexShard shard = newShard(shardRouting, metaData, new InternalEngineFactory()); + recoverShardFromStore(shard); + + for (int i = 0; i < 1; i++) { + final String id = Integer.toString(i); + indexDoc(shard, "_doc", id); + } + SnapshotId snapshotId = new SnapshotId("test", "test"); + IndexId indexId = new IndexId(shard.shardId().getIndexName(), shard.shardId().getIndex().getUUID()); + SourceOnlySnapshotRepository repository = new SourceOnlySnapshotRepository(createRepository()); + repository.start(); + try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { + IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> + runAsSnapshot(shard.getThreadPool(), + () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, + snapshotRef.getIndexCommit(), indexShardSnapshotStatus))); + assertEquals("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled or filters the source" + , illegalStateException.getMessage()); + } + closeShards(shard); + } + + public void testIncrementalSnapshot() throws IOException { + IndexShard shard = newStartedShard(); + for (int i = 0; i < 10; i++) { + final String id = Integer.toString(i); + indexDoc(shard, "_doc", id); + } + + IndexId indexId = new IndexId(shard.shardId().getIndexName(), shard.shardId().getIndex().getUUID()); + SourceOnlySnapshotRepository repository = new SourceOnlySnapshotRepository(createRepository()); + repository.start(); + int totalFileCount = -1; + try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { + IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); + SnapshotId snapshotId = new SnapshotId("test", "test"); + runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef + .getIndexCommit(), indexShardSnapshotStatus)); + IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); + assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); + totalFileCount = copy.getTotalFileCount(); + assertEquals(copy.getStage(), IndexShardSnapshotStatus.Stage.DONE); + } + + indexDoc(shard, "_doc", Integer.toString(10)); + indexDoc(shard, "_doc", Integer.toString(11)); + try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { + SnapshotId snapshotId = new SnapshotId("test_1", "test_1"); + + IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); + runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef + .getIndexCommit(), indexShardSnapshotStatus)); + IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); + // we processed the segments_N file plus _1.si, _1.fdx, _1.fnm, _1.fdt + assertEquals(5, copy.getIncrementalFileCount()); + // in total we have 4 more files than the previous snap since we don't count the segments_N twice + assertEquals(totalFileCount+4, copy.getTotalFileCount()); + assertEquals(copy.getStage(), IndexShardSnapshotStatus.Stage.DONE); + } + deleteDoc(shard, "_doc", Integer.toString(10)); + try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { + SnapshotId snapshotId = new SnapshotId("test_2", "test_2"); + + IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); + runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef + .getIndexCommit(), indexShardSnapshotStatus)); + IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); + // we processed the segments_N file plus _1_1.liv + assertEquals(2, copy.getIncrementalFileCount()); + // in total we have 5 more files than the previous snap since we don't count the segments_N twice + assertEquals(totalFileCount+5, copy.getTotalFileCount()); + assertEquals(copy.getStage(), IndexShardSnapshotStatus.Stage.DONE); + } + closeShards(shard); + } + + private String randomDoc() { + return "{ \"value\" : \"" + randomAlphaOfLength(10) + "\"}"; + } + + public void testRestoreMinmal() throws IOException { + IndexShard shard = newStartedShard(true); + int numInitialDocs = randomIntBetween(10, 100); + for (int i = 0; i < numInitialDocs; i++) { + final String id = Integer.toString(i); + indexDoc(shard, "_doc", id, randomDoc()); + if (randomBoolean()) { + shard.refresh("test"); + } + } + for (int i = 0; i < numInitialDocs; i++) { + final String id = Integer.toString(i); + if (randomBoolean()) { + if (rarely()) { + deleteDoc(shard, "_doc", id); + } else { + indexDoc(shard, "_doc", id, randomDoc()); + } + } + if (frequently()) { + shard.refresh("test"); + } + } + SnapshotId snapshotId = new SnapshotId("test", "test"); + IndexId indexId = new IndexId(shard.shardId().getIndexName(), shard.shardId().getIndex().getUUID()); + SourceOnlySnapshotRepository repository = new SourceOnlySnapshotRepository(createRepository()); + repository.start(); + try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { + IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); + runAsSnapshot(shard.getThreadPool(), () -> { + repository.initializeSnapshot(snapshotId, Arrays.asList(indexId), + MetaData.builder().put(shard.indexSettings() + .getIndexMetaData(), false).build()); + repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef.getIndexCommit(), indexShardSnapshotStatus); + }); + IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); + assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); + assertEquals(copy.getStage(), IndexShardSnapshotStatus.Stage.DONE); + } + shard.refresh("test"); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId("index", "_na_", 0), randomAlphaOfLength(10), true, + ShardRoutingState.INITIALIZING, + new RecoverySource.SnapshotRecoverySource(new Snapshot("src_only", snapshotId), Version.CURRENT, indexId.getId())); + IndexMetaData metaData = runAsSnapshot(threadPool, () -> repository.getSnapshotIndexMetaData(snapshotId, indexId)); + IndexShard restoredShard = newShard(shardRouting, metaData, null, SourceOnlySnapshotRepository.getEngineFactory(), () -> {}); + restoredShard.mapperService().merge(shard.indexSettings().getIndexMetaData(), MapperService.MergeReason.MAPPING_RECOVERY); + DiscoveryNode discoveryNode = new DiscoveryNode("node_g", buildNewFakeTransportAddress(), Version.CURRENT); + restoredShard.markAsRecovering("test from snap", new RecoveryState(restoredShard.routingEntry(), discoveryNode, null)); + runAsSnapshot(shard.getThreadPool(), () -> + assertTrue(restoredShard.restoreFromRepository(repository))); + assertEquals(restoredShard.recoveryState().getStage(), RecoveryState.Stage.DONE); + assertEquals(restoredShard.recoveryState().getTranslog().recoveredOperations(), 0); + assertEquals(IndexShardState.POST_RECOVERY, restoredShard.state()); + restoredShard.refresh("test"); + assertEquals(restoredShard.docStats().getCount(), shard.docStats().getCount()); + EngineException engineException = expectThrows(EngineException.class, () -> restoredShard.get( + new Engine.Get(false, false, "_doc", Integer.toString(0), new Term("_id", Uid.encodeId(Integer.toString(0)))))); + assertEquals(engineException.getCause().getMessage(), "_source only indices can't be searched or filtered"); + SeqNoStats seqNoStats = restoredShard.seqNoStats(); + assertEquals(seqNoStats.getMaxSeqNo(), seqNoStats.getLocalCheckpoint()); + final IndexShard targetShard; + try (Engine.Searcher searcher = restoredShard.acquireSearcher("test")) { + assertEquals(searcher.reader().maxDoc(), seqNoStats.getLocalCheckpoint()); + TopDocs search = searcher.searcher().search(new MatchAllDocsQuery(), Integer.MAX_VALUE); + assertEquals(searcher.reader().numDocs(), search.totalHits.value); + search = searcher.searcher().search(new MatchAllDocsQuery(), Integer.MAX_VALUE, + new Sort(new SortField(SeqNoFieldMapper.NAME, SortField.Type.LONG)), false); + assertEquals(searcher.reader().numDocs(), search.totalHits.value); + long previous = -1; + for (ScoreDoc doc : search.scoreDocs) { + FieldDoc fieldDoc = (FieldDoc) doc; + assertEquals(1, fieldDoc.fields.length); + long current = (Long)fieldDoc.fields[0]; + assertThat(previous, Matchers.lessThan(current)); + previous = current; + } + expectThrows(UnsupportedOperationException.class, () -> searcher.searcher().search(new TermQuery(new Term("boom", "boom")), 1)); + targetShard = reindex(searcher.getDirectoryReader(), new MappingMetaData("_doc", + restoredShard.mapperService().documentMapper("_doc").meta())); + } + + for (int i = 0; i < numInitialDocs; i++) { + Engine.Get get = new Engine.Get(false, false, "_doc", Integer.toString(i), new Term("_id", Uid.encodeId(Integer.toString(i)))); + Engine.GetResult original = shard.get(get); + Engine.GetResult restored = targetShard.get(get); + assertEquals(original.exists(), restored.exists()); + + if (original.exists()) { + Document document = original.docIdAndVersion().reader.document(original.docIdAndVersion().docId); + Document restoredDocument = restored.docIdAndVersion().reader.document(restored.docIdAndVersion().docId); + for (IndexableField field : document) { + assertEquals(document.get(field.name()), restoredDocument.get(field.name())); + } + } + IOUtils.close(original, restored); + } + + closeShards(shard, restoredShard, targetShard); + } + + public IndexShard reindex(DirectoryReader reader, MappingMetaData mapping) throws IOException { + ShardRouting targetShardRouting = TestShardRouting.newShardRouting(new ShardId("target", "_na_", 0), randomAlphaOfLength(10), true, + ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData.Builder metaData = IndexMetaData.builder(targetShardRouting.getIndexName()) + .settings(settings) + .primaryTerm(0, primaryTerm); + metaData.putMapping(mapping); + IndexShard targetShard = newShard(targetShardRouting, metaData.build(), new InternalEngineFactory()); + boolean success = false; + try { + recoverShardFromStore(targetShard); + String index = targetShard.shardId().getIndexName(); + FieldsVisitor rootFieldsVisitor = new FieldsVisitor(true); + for (LeafReaderContext ctx : reader.leaves()) { + LeafReader leafReader = ctx.reader(); + Bits liveDocs = leafReader.getLiveDocs(); + for (int i = 0; i < leafReader.maxDoc(); i++) { + if (liveDocs == null || liveDocs.get(i)) { + rootFieldsVisitor.reset(); + leafReader.document(i, rootFieldsVisitor); + rootFieldsVisitor.postProcess(targetShard.mapperService()); + Uid uid = rootFieldsVisitor.uid(); + BytesReference source = rootFieldsVisitor.source(); + assert source != null : "_source is null but should have been filtered out at snapshot time"; + Engine.Result result = targetShard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, source + (index, uid.type(), uid.id(), source, XContentHelper.xContentType(source)) + .routing(rootFieldsVisitor.routing()), 1, false); + if (result.getResultType() != Engine.Result.Type.SUCCESS) { + throw new IllegalStateException("failed applying post restore operation result: " + result + .getResultType(), result.getFailure()); + } + } + } + } + targetShard.refresh("test"); + success = true; + } finally { + if (success == false) { + closeShards(targetShard); + } + } + return targetShard; + } + + + /** Create a {@link Environment} with random path.home and path.repo **/ + private Environment createEnvironment() { + Path home = createTempDir(); + return TestEnvironment.newEnvironment(Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), home.toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), home.resolve("repo").toAbsolutePath()) + .build()); + } + + /** Create a {@link Repository} with a random name **/ + private Repository createRepository() throws IOException { + Settings settings = Settings.builder().put("location", randomAlphaOfLength(10)).build(); + RepositoryMetaData repositoryMetaData = new RepositoryMetaData(randomAlphaOfLength(10), FsRepository.TYPE, settings); + return new FsRepository(repositoryMetaData, createEnvironment(), xContentRegistry()); + } + + private static void runAsSnapshot(ThreadPool pool, Runnable runnable) { + runAsSnapshot(pool, (Callable) () -> { + runnable.run(); + return null; + }); + } + + private static T runAsSnapshot(ThreadPool pool, Callable runnable) { + PlainActionFuture future = new PlainActionFuture<>(); + pool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + try { + future.onResponse(runnable.call()); + } catch (Exception e) { + future.onFailure(e); + } + }); + try { + return future.get(); + } catch (ExecutionException e) { + if (e.getCause() instanceof Exception) { + throw ExceptionsHelper.convertToRuntime((Exception) e.getCause()); + } else { + throw new AssertionError(e.getCause()); + } + } catch (InterruptedException e) { + throw new AssertionError(e); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotTests.java new file mode 100644 index 00000000000..e7d731739de --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotTests.java @@ -0,0 +1,245 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.snapshots; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FloatPoint; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FilterMergePolicy; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.SegmentCommitInfo; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.index.SnapshotDeletionPolicy; +import org.apache.lucene.index.SoftDeletesDirectoryReaderWrapper; +import org.apache.lucene.index.StandardDirectoryReader; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.List; + +public class SourceOnlySnapshotTests extends ESTestCase { + public void testSourceOnlyRandom() throws IOException { + try (Directory dir = newDirectory(); Directory targetDir = newDirectory()) { + SnapshotDeletionPolicy deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); + IndexWriterConfig indexWriterConfig = newIndexWriterConfig().setIndexDeletionPolicy + (deletionPolicy).setSoftDeletesField(random().nextBoolean() ? null : Lucene.SOFT_DELETES_FIELD); + try (RandomIndexWriter writer = new RandomIndexWriter(random(), dir, indexWriterConfig, false)) { + final String softDeletesField = writer.w.getConfig().getSoftDeletesField(); + // we either use the soft deletes directly or manually delete them to test the additional delete functionality + boolean modifyDeletedDocs = softDeletesField != null && randomBoolean(); + SourceOnlySnapshot snapshoter = new SourceOnlySnapshot(targetDir, + modifyDeletedDocs ? () -> new DocValuesFieldExistsQuery(softDeletesField) : null) { + @Override + DirectoryReader wrapReader(DirectoryReader reader) throws IOException { + return modifyDeletedDocs ? reader : super.wrapReader(reader); + } + }; + writer.commit(); + int numDocs = scaledRandomIntBetween(100, 10000); + boolean appendOnly = randomBoolean(); + for (int i = 0; i < numDocs; i++) { + int docId = appendOnly ? i : randomIntBetween(0, 100); + Document d = newRandomDocument(docId); + if (appendOnly) { + writer.addDocument(d); + } else { + writer.updateDocument(new Term("id", Integer.toString(docId)), d); + } + if (rarely()) { + if (randomBoolean()) { + writer.commit(); + } + IndexCommit snapshot = deletionPolicy.snapshot(); + try { + snapshoter.syncSnapshot(snapshot); + } finally { + deletionPolicy.release(snapshot); + } + } + } + if (randomBoolean()) { + writer.commit(); + } + IndexCommit snapshot = deletionPolicy.snapshot(); + try { + snapshoter.syncSnapshot(snapshot); + try (DirectoryReader snapReader = snapshoter.wrapReader(DirectoryReader.open(targetDir)); + DirectoryReader wrappedReader = snapshoter.wrapReader(DirectoryReader.open(snapshot))) { + DirectoryReader reader = modifyDeletedDocs + ? new SoftDeletesDirectoryReaderWrapper(wrappedReader, softDeletesField) : wrappedReader; + assertEquals(snapReader.maxDoc(), reader.maxDoc()); + assertEquals(snapReader.numDocs(), reader.numDocs()); + for (int i = 0; i < snapReader.maxDoc(); i++) { + assertEquals(snapReader.document(i).get("_source"), reader.document(i).get("_source")); + } + for (LeafReaderContext ctx : snapReader.leaves()) { + if (ctx.reader() instanceof SegmentReader) { + assertNull(((SegmentReader) ctx.reader()).getSegmentInfo().info.getIndexSort()); + } + } + } + } finally { + deletionPolicy.release(snapshot); + } + } + } + } + + private Document newRandomDocument(int id) { + Document doc = new Document(); + doc.add(new StringField("id", Integer.toString(id), Field.Store.YES)); + doc.add(new NumericDocValuesField("id", id)); + if (randomBoolean()) { + doc.add(new TextField("text", "the quick brown fox", Field.Store.NO)); + } + if (randomBoolean()) { + doc.add(new FloatPoint("float_point", 1.3f, 3.4f)); + } + if (randomBoolean()) { + doc.add(new NumericDocValuesField("some_value", randomLong())); + } + doc.add(new StoredField("_source", randomRealisticUnicodeOfCodepointLengthBetween(5, 10))); + return doc; + } + + public void testSrcOnlySnap() throws IOException { + try (Directory dir = newDirectory()) { + SnapshotDeletionPolicy deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig() + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) + .setIndexDeletionPolicy(deletionPolicy).setMergePolicy(new FilterMergePolicy(NoMergePolicy.INSTANCE) { + @Override + public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo, MergeContext mergeContext) { + return randomBoolean(); + } + })); + Document doc = new Document(); + doc.add(new StringField("id", "1", Field.Store.YES)); + doc.add(new TextField("text", "the quick brown fox", Field.Store.NO)); + doc.add(new NumericDocValuesField("rank", 1)); + doc.add(new StoredField("src", "the quick brown fox")); + writer.addDocument(doc); + doc = new Document(); + doc.add(new StringField("id", "2", Field.Store.YES)); + doc.add(new TextField("text", "the quick blue fox", Field.Store.NO)); + doc.add(new NumericDocValuesField("rank", 2)); + doc.add(new StoredField("src", "the quick blue fox")); + doc.add(new StoredField("dummy", "foo")); // add a field only this segment has + writer.addDocument(doc); + writer.flush(); + doc = new Document(); + doc.add(new StringField("id", "1", Field.Store.YES)); + doc.add(new TextField("text", "the quick brown fox", Field.Store.NO)); + doc.add(new NumericDocValuesField("rank", 3)); + doc.add(new StoredField("src", "the quick brown fox")); + writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField(Lucene.SOFT_DELETES_FIELD, 1)); + writer.commit(); + Directory targetDir = newDirectory(); + IndexCommit snapshot = deletionPolicy.snapshot(); + SourceOnlySnapshot snapshoter = new SourceOnlySnapshot(targetDir); + snapshoter.syncSnapshot(snapshot); + + StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(snapshot); + try (DirectoryReader snapReader = DirectoryReader.open(targetDir)) { + assertEquals(snapReader.maxDoc(), 3); + assertEquals(snapReader.numDocs(), 2); + for (int i = 0; i < 3; i++) { + assertEquals(snapReader.document(i).get("src"), reader.document(i).get("src")); + } + IndexSearcher searcher = new IndexSearcher(snapReader); + TopDocs id = searcher.search(new TermQuery(new Term("id", "1")), 10); + assertEquals(0, id.totalHits.value); + } + + snapshoter = new SourceOnlySnapshot(targetDir); + List createdFiles = snapshoter.syncSnapshot(snapshot); + assertEquals(0, createdFiles.size()); + deletionPolicy.release(snapshot); + // now add another doc + doc = new Document(); + doc.add(new StringField("id", "4", Field.Store.YES)); + doc.add(new TextField("text", "the quick blue fox", Field.Store.NO)); + doc.add(new NumericDocValuesField("rank", 2)); + doc.add(new StoredField("src", "the quick blue fox")); + writer.addDocument(doc); + doc = new Document(); + doc.add(new StringField("id", "5", Field.Store.YES)); + doc.add(new TextField("text", "the quick blue fox", Field.Store.NO)); + doc.add(new NumericDocValuesField("rank", 2)); + doc.add(new StoredField("src", "the quick blue fox")); + writer.addDocument(doc); + writer.commit(); + { + snapshot = deletionPolicy.snapshot(); + snapshoter = new SourceOnlySnapshot(targetDir); + createdFiles = snapshoter.syncSnapshot(snapshot); + assertEquals(4, createdFiles.size()); + for (String file : createdFiles) { + String extension = IndexFileNames.getExtension(file); + switch (extension) { + case "fdt": + case "fdx": + case "fnm": + case "si": + break; + default: + fail("unexpected extension: " + extension); + } + } + try(DirectoryReader snapReader = DirectoryReader.open(targetDir)) { + assertEquals(snapReader.maxDoc(), 5); + assertEquals(snapReader.numDocs(), 4); + } + deletionPolicy.release(snapshot); + } + writer.deleteDocuments(new Term("id", "5")); + writer.commit(); + { + snapshot = deletionPolicy.snapshot(); + snapshoter = new SourceOnlySnapshot(targetDir); + createdFiles = snapshoter.syncSnapshot(snapshot); + assertEquals(1, createdFiles.size()); + for (String file : createdFiles) { + String extension = IndexFileNames.getExtension(file); + switch (extension) { + case "liv": + break; + default: + fail("unexpected extension: " + extension); + } + } + try(DirectoryReader snapReader = DirectoryReader.open(targetDir)) { + assertEquals(snapReader.maxDoc(), 5); + assertEquals(snapReader.numDocs(), 3); + } + deletionPolicy.release(snapshot); + } + writer.close(); + targetDir.close(); + reader.close(); + } + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternRequestTests.java similarity index 94% rename from x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternRequestTests.java index 0ca1b3d1278..135e699bb35 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternRequestTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ccr.action; +package org.elasticsearch.xpack.core.ccr.action; import org.elasticsearch.test.AbstractStreamableTestCase; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternRequestTests.java similarity index 95% rename from x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternRequestTests.java index 27760578db9..f11e1885e80 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternRequestTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ccr.action; +package org.elasticsearch.xpack.core.ccr.action; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; @@ -41,7 +41,7 @@ public class PutAutoFollowPatternRequestTests extends AbstractStreamableXContent request.setIdleShardRetryDelay(TimeValue.timeValueMillis(500)); } if (randomBoolean()) { - request.setRetryTimeout(TimeValue.timeValueMillis(500)); + request.setMaxRetryDelay(TimeValue.timeValueMillis(500)); } if (randomBoolean()) { request.setMaxBatchOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructureTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructureTests.java index 6dcf6751965..e09b9e3f91e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructureTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructureTests.java @@ -50,18 +50,17 @@ public class FileStructureTests extends AbstractSerializingTestCase queryShardContext, bitsetFilterCache, threadContext, licenseState, scriptService) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java index e364b0a7e8a..207c9d22198 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java @@ -131,7 +131,6 @@ public class SecurityIndexSearcherWrapperUnitTests extends ESTestCase { ShardId shardId = new ShardId(index, 0); licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); threadContext = new ThreadContext(Settings.EMPTY); IndexShard indexShard = mock(IndexShard.class); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java index 7a7deac0136..60b8235ec84 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java @@ -77,7 +77,7 @@ public class TransportPutDatafeedAction extends TransportMasterNodeAction listener) { // If security is enabled only create the datafeed if the user requesting creation has // permission to read the indices the datafeed is going to read from - if (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed()) { + if (licenseState.isAuthAllowed()) { final String username = securityContext.getUser().principal(); ActionListener privResponseListener = ActionListener.wrap( r -> handlePrivsResponse(username, request, r, listener), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java index 625858c867a..ba6b590dfc8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java @@ -49,10 +49,12 @@ public class DelimitedFileStructureFinder implements FileStructureFinder { Tuple headerInfo = findHeaderFromSample(explanation, rows); boolean isHeaderInFile = headerInfo.v1(); String[] header = headerInfo.v2(); - String[] headerWithNamedBlanks = new String[header.length]; + // The column names are the header names but with blanks named column1, column2, etc. + String[] columnNames = new String[header.length]; for (int i = 0; i < header.length; ++i) { - String rawHeader = header[i].isEmpty() ? "column" + (i + 1) : header[i]; - headerWithNamedBlanks[i] = trimFields ? rawHeader.trim() : rawHeader; + assert header[i] != null; + String rawHeader = trimFields ? header[i].trim() : header[i]; + columnNames[i] = rawHeader.isEmpty() ? "column" + (i + 1) : rawHeader; } List sampleLines = Arrays.asList(sample.split("\n")); @@ -63,7 +65,7 @@ public class DelimitedFileStructureFinder implements FileStructureFinder { List row = rows.get(index); int lineNumber = lineNumbers.get(index); Map sampleRecord = new LinkedHashMap<>(); - Util.filterListToMap(sampleRecord, headerWithNamedBlanks, + Util.filterListToMap(sampleRecord, columnNames, trimFields ? row.stream().map(String::trim).collect(Collectors.toList()) : row); sampleRecords.add(sampleRecord); sampleMessages.add( @@ -82,7 +84,7 @@ public class DelimitedFileStructureFinder implements FileStructureFinder { .setNumMessagesAnalyzed(sampleRecords.size()) .setHasHeaderRow(isHeaderInFile) .setDelimiter(delimiter) - .setInputFields(Arrays.stream(headerWithNamedBlanks).collect(Collectors.toList())); + .setColumnNames(Arrays.stream(columnNames).collect(Collectors.toList())); if (trimFields) { structureBuilder.setShouldTrimFields(true); @@ -225,7 +227,9 @@ public class DelimitedFileStructureFinder implements FileStructureFinder { // SuperCSV will put nulls in the header if any columns don't have names, but empty strings are better for us return new Tuple<>(true, firstRow.stream().map(field -> (field == null) ? "" : field).toArray(String[]::new)); } else { - return new Tuple<>(false, IntStream.rangeClosed(1, firstRow.size()).mapToObj(num -> "column" + num).toArray(String[]::new)); + String[] dummyHeader = new String[firstRow.size()]; + Arrays.fill(dummyHeader, ""); + return new Tuple<>(false, dummyHeader); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java index 6d1f039399e..4e692d58391 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java @@ -45,7 +45,7 @@ public class DelimitedFileStructureFinderTests extends FileStructureTestCase { assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); - assertEquals(Arrays.asList("time", "message"), structure.getInputFields()); + assertEquals(Arrays.asList("time", "message"), structure.getColumnNames()); assertNull(structure.getGrokPattern()); assertEquals("time", structure.getTimestampField()); assertEquals(Collections.singletonList("ISO8601"), structure.getTimestampFormats()); @@ -76,7 +76,7 @@ public class DelimitedFileStructureFinderTests extends FileStructureTestCase { assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); - assertEquals(Arrays.asList("message", "time", "count"), structure.getInputFields()); + assertEquals(Arrays.asList("message", "time", "count"), structure.getColumnNames()); assertNull(structure.getGrokPattern()); assertEquals("time", structure.getTimestampField()); assertEquals(Collections.singletonList("ISO8601"), structure.getTimestampFormats()); @@ -114,7 +114,7 @@ public class DelimitedFileStructureFinderTests extends FileStructureTestCase { assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime", "passenger_count", "trip_distance", "RatecodeID", "store_and_fwd_flag", "PULocationID", "DOLocationID", "payment_type", "fare_amount", "extra", "mta_tax", - "tip_amount", "tolls_amount", "improvement_surcharge", "total_amount", "column18", "column19"), structure.getInputFields()); + "tip_amount", "tolls_amount", "improvement_surcharge", "total_amount", "column18", "column19"), structure.getColumnNames()); assertNull(structure.getGrokPattern()); assertEquals("tpep_pickup_datetime", structure.getTimestampField()); assertEquals(Collections.singletonList("YYYY-MM-dd HH:mm:ss"), structure.getTimestampFormats()); @@ -152,7 +152,7 @@ public class DelimitedFileStructureFinderTests extends FileStructureTestCase { assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime", "passenger_count", "trip_distance", "RatecodeID", "store_and_fwd_flag", "PULocationID", "DOLocationID", "payment_type", "fare_amount", "extra", "mta_tax", - "tip_amount", "tolls_amount", "improvement_surcharge", "total_amount"), structure.getInputFields()); + "tip_amount", "tolls_amount", "improvement_surcharge", "total_amount"), structure.getColumnNames()); assertNull(structure.getGrokPattern()); assertEquals("tpep_pickup_datetime", structure.getTimestampField()); assertEquals(Collections.singletonList("YYYY-MM-dd HH:mm:ss"), structure.getTimestampFormats()); @@ -183,7 +183,7 @@ public class DelimitedFileStructureFinderTests extends FileStructureTestCase { assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); - assertEquals(Arrays.asList("pos_id", "trip_id", "latitude", "longitude", "altitude", "timestamp"), structure.getInputFields()); + assertEquals(Arrays.asList("pos_id", "trip_id", "latitude", "longitude", "altitude", "timestamp"), structure.getColumnNames()); assertNull(structure.getGrokPattern()); assertEquals("timestamp", structure.getTimestampField()); assertEquals(Collections.singletonList("YYYY-MM-dd HH:mm:ss.SSSSSS"), structure.getTimestampFormats()); @@ -213,7 +213,7 @@ public class DelimitedFileStructureFinderTests extends FileStructureTestCase { DelimitedFileStructureFinder.readRows(withoutHeader, CsvPreference.EXCEL_PREFERENCE).v1()); assertFalse(header.v1()); - assertThat(header.v2(), arrayContaining("column1", "column2", "column3", "column4")); + assertThat(header.v2(), arrayContaining("", "", "", "")); } public void testLevenshteinDistance() { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java index 4f9119df589..bb2ed76831d 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java @@ -39,6 +39,7 @@ import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.monitoring.action.TransportMonitoringBulkAction; import org.elasticsearch.xpack.monitoring.cleaner.CleanerService; import org.elasticsearch.xpack.monitoring.collector.Collector; +import org.elasticsearch.xpack.monitoring.collector.ccr.CcrStatsCollector; import org.elasticsearch.xpack.monitoring.collector.cluster.ClusterStatsCollector; import org.elasticsearch.xpack.monitoring.collector.indices.IndexRecoveryCollector; import org.elasticsearch.xpack.monitoring.collector.indices.IndexStatsCollector; @@ -142,6 +143,7 @@ public class Monitoring extends Plugin implements ActionPlugin { collectors.add(new NodeStatsCollector(settings, clusterService, getLicenseState(), client)); collectors.add(new IndexRecoveryCollector(settings, clusterService, getLicenseState(), client)); collectors.add(new JobStatsCollector(settings, clusterService, getLicenseState(), client)); + collectors.add(new CcrStatsCollector(settings, clusterService, getLicenseState(), client)); final MonitoringService monitoringService = new MonitoringService(settings, clusterService, threadPool, collectors, exporters); @@ -179,6 +181,7 @@ public class Monitoring extends Plugin implements ActionPlugin { settings.add(IndexRecoveryCollector.INDEX_RECOVERY_ACTIVE_ONLY); settings.add(IndexStatsCollector.INDEX_STATS_TIMEOUT); settings.add(JobStatsCollector.JOB_STATS_TIMEOUT); + settings.add(CcrStatsCollector.CCR_STATS_TIMEOUT); settings.add(NodeStatsCollector.NODE_STATS_TIMEOUT); settings.addAll(Exporters.getSettings()); return Collections.unmodifiableList(settings); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollector.java new file mode 100644 index 00000000000..510f430d196 --- /dev/null +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollector.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.monitoring.collector.ccr; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.client.CcrClient; +import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; +import org.elasticsearch.xpack.monitoring.collector.Collector; + +import java.util.Collection; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ClientHelper.MONITORING_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; +import static org.elasticsearch.xpack.monitoring.collector.ccr.CcrStatsMonitoringDoc.TYPE; + +public class CcrStatsCollector extends Collector { + + public static final Setting CCR_STATS_TIMEOUT = collectionTimeoutSetting("ccr.stats.timeout"); + + private final ThreadContext threadContext; + private final CcrClient ccrClient; + + public CcrStatsCollector( + final Settings settings, + final ClusterService clusterService, + final XPackLicenseState licenseState, + final Client client) { + this(settings, clusterService, licenseState, new XPackClient(client).ccr(), client.threadPool().getThreadContext()); + } + + CcrStatsCollector( + final Settings settings, + final ClusterService clusterService, + final XPackLicenseState licenseState, + final CcrClient ccrClient, + final ThreadContext threadContext) { + super(settings, TYPE, clusterService, CCR_STATS_TIMEOUT, licenseState); + this.ccrClient = ccrClient; + this.threadContext = threadContext; + } + + @Override + protected boolean shouldCollect(final boolean isElectedMaster) { + // this can only run when monitoring is allowed and CCR is enabled and allowed, but also only on the elected master node + return isElectedMaster + && super.shouldCollect(isElectedMaster) + && XPackSettings.CCR_ENABLED_SETTING.get(settings) + && licenseState.isCcrAllowed(); + } + + + @Override + protected Collection doCollect( + final MonitoringDoc.Node node, + final long interval, + final ClusterState clusterState) throws Exception { + try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, MONITORING_ORIGIN)) { + final CcrStatsAction.StatsRequest request = new CcrStatsAction.StatsRequest(); + request.setIndices(getCollectionIndices()); + request.setIndicesOptions(IndicesOptions.lenientExpandOpen()); + final CcrStatsAction.StatsResponses responses = ccrClient.stats(request).actionGet(getCollectionTimeout()); + + final long timestamp = timestamp(); + final String clusterUuid = clusterUuid(clusterState); + + return responses + .getStatsResponses() + .stream() + .map(stats -> new CcrStatsMonitoringDoc(clusterUuid, timestamp, interval, node, stats.status())) + .collect(Collectors.toList()); + } + } + +} diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDoc.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDoc.java new file mode 100644 index 00000000000..45c6a8607d4 --- /dev/null +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDoc.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.monitoring.collector.ccr; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; +import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; + +import java.io.IOException; +import java.util.Objects; + +public class CcrStatsMonitoringDoc extends MonitoringDoc { + + public static final String TYPE = "ccr_stats"; + + private final ShardFollowNodeTaskStatus status; + + public ShardFollowNodeTaskStatus status() { + return status; + } + + public CcrStatsMonitoringDoc( + final String cluster, + final long timestamp, + final long intervalMillis, + final MonitoringDoc.Node node, + final ShardFollowNodeTaskStatus status) { + super(cluster, timestamp, intervalMillis, node, MonitoredSystem.ES, TYPE, null); + this.status = Objects.requireNonNull(status, "status"); + } + + + @Override + protected void innerToXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(TYPE); + { + status.toXContentFragment(builder, params); + } + builder.endObject(); + } + +} diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollectorTests.java new file mode 100644 index 00000000000..aaf3a61643b --- /dev/null +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollectorTests.java @@ -0,0 +1,217 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.monitoring.collector.ccr; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.client.CcrClient; +import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; +import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; +import org.elasticsearch.xpack.monitoring.BaseCollectorTestCase; +import org.mockito.ArgumentMatcher; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.monitoring.MonitoringTestUtils.randomMonitoringNode; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.argThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class CcrStatsCollectorTests extends BaseCollectorTestCase { + + public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() { + final Settings settings = randomFrom(ccrEnabledSettings(), ccrDisabledSettings()); + final boolean ccrAllowed = randomBoolean(); + final boolean isElectedMaster = randomBoolean(); + whenLocalNodeElectedMaster(isElectedMaster); + + // this controls the blockage + when(licenseState.isMonitoringAllowed()).thenReturn(false); + when(licenseState.isCcrAllowed()).thenReturn(ccrAllowed); + + final CcrStatsCollector collector = new CcrStatsCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(false)); + if (isElectedMaster) { + verify(licenseState).isMonitoringAllowed(); + } + } + + public void testShouldCollectReturnsFalseIfNotMaster() { + // regardless of CCR being enabled + final Settings settings = randomFrom(ccrEnabledSettings(), ccrDisabledSettings()); + + when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean()); + when(licenseState.isCcrAllowed()).thenReturn(randomBoolean()); + // this controls the blockage + final boolean isElectedMaster = false; + + final CcrStatsCollector collector = new CcrStatsCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(false)); + } + + public void testShouldCollectReturnsFalseIfCCRIsDisabled() { + // this is controls the blockage + final Settings settings = ccrDisabledSettings(); + + when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean()); + when(licenseState.isCcrAllowed()).thenReturn(randomBoolean()); + + final boolean isElectedMaster = randomBoolean(); + whenLocalNodeElectedMaster(isElectedMaster); + + final CcrStatsCollector collector = new CcrStatsCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(false)); + + if (isElectedMaster) { + verify(licenseState).isMonitoringAllowed(); + } + } + + public void testShouldCollectReturnsFalseIfCCRIsNotAllowed() { + final Settings settings = randomFrom(ccrEnabledSettings(), ccrDisabledSettings()); + + when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean()); + // this is controls the blockage + when(licenseState.isCcrAllowed()).thenReturn(false); + final boolean isElectedMaster = randomBoolean(); + whenLocalNodeElectedMaster(isElectedMaster); + + final CcrStatsCollector collector = new CcrStatsCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(false)); + + if (isElectedMaster) { + verify(licenseState).isMonitoringAllowed(); + } + } + + public void testShouldCollectReturnsTrue() { + final Settings settings = ccrEnabledSettings(); + + when(licenseState.isMonitoringAllowed()).thenReturn(true); + when(licenseState.isCcrAllowed()).thenReturn(true); + final boolean isElectedMaster = true; + + final CcrStatsCollector collector = new CcrStatsCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(true)); + + verify(licenseState).isMonitoringAllowed(); + } + + public void testDoCollect() throws Exception { + final String clusterUuid = randomAlphaOfLength(5); + whenClusterStateWithUUID(clusterUuid); + + final MonitoringDoc.Node node = randomMonitoringNode(random()); + final CcrClient client = mock(CcrClient.class); + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + + final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(1, 120)); + withCollectionTimeout(CcrStatsCollector.CCR_STATS_TIMEOUT, timeout); + + final CcrStatsCollector collector = new CcrStatsCollector(Settings.EMPTY, clusterService, licenseState, client, threadContext); + assertEquals(timeout, collector.getCollectionTimeout()); + + final List statuses = mockStatuses(); + + @SuppressWarnings("unchecked") + final ActionFuture future = (ActionFuture)mock(ActionFuture.class); + final CcrStatsAction.StatsResponses responses = new CcrStatsAction.StatsResponses(emptyList(), emptyList(), statuses); + + final CcrStatsAction.StatsRequest request = new CcrStatsAction.StatsRequest(); + request.setIndices(Strings.EMPTY_ARRAY); + when(client.stats(statsRequestEq(request))).thenReturn(future); + when(future.actionGet(timeout)).thenReturn(responses); + + final long interval = randomNonNegativeLong(); + + final Collection documents = collector.doCollect(node, interval, clusterState); + verify(clusterState).metaData(); + verify(metaData).clusterUUID(); + + assertThat(documents, hasSize(statuses.size())); + + int index = 0; + for (final Iterator it = documents.iterator(); it.hasNext(); index++) { + final CcrStatsMonitoringDoc document = (CcrStatsMonitoringDoc)it.next(); + final CcrStatsAction.StatsResponse status = statuses.get(index); + + assertThat(document.getCluster(), is(clusterUuid)); + assertThat(document.getTimestamp(), greaterThan(0L)); + assertThat(document.getIntervalMillis(), equalTo(interval)); + assertThat(document.getNode(), equalTo(node)); + assertThat(document.getSystem(), is(MonitoredSystem.ES)); + assertThat(document.getType(), is(CcrStatsMonitoringDoc.TYPE)); + assertThat(document.getId(), nullValue()); + assertThat(document.status(), is(status.status())); + } + } + + private List mockStatuses() { + final int count = randomIntBetween(1, 8); + final List statuses = new ArrayList<>(count); + + for (int i = 0; i < count; ++i) { + CcrStatsAction.StatsResponse statsResponse = mock(CcrStatsAction.StatsResponse.class); + ShardFollowNodeTaskStatus status = mock(ShardFollowNodeTaskStatus.class); + when(statsResponse.status()).thenReturn(status); + statuses.add(statsResponse); + } + + return statuses; + } + + private Settings ccrEnabledSettings() { + // since it's the default, we want to ensure we test both with/without it + return randomBoolean() ? Settings.EMPTY : Settings.builder().put(XPackSettings.CCR_ENABLED_SETTING.getKey(), true).build(); + } + + private Settings ccrDisabledSettings() { + return Settings.builder().put(XPackSettings.CCR_ENABLED_SETTING.getKey(), false).build(); + } + + private static CcrStatsAction.StatsRequest statsRequestEq(CcrStatsAction.StatsRequest expected) { + return argThat(new StatsRequestMatches(expected)); + } + + private static class StatsRequestMatches extends ArgumentMatcher { + + private final CcrStatsAction.StatsRequest expected; + + private StatsRequestMatches(CcrStatsAction.StatsRequest expected) { + this.expected = expected; + } + + @Override + public boolean matches(Object o) { + CcrStatsAction.StatsRequest actual = (CcrStatsAction.StatsRequest) o; + return Arrays.equals(expected.indices(), actual.indices()); + } + } + +} diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java new file mode 100644 index 00000000000..70b73e5eed0 --- /dev/null +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java @@ -0,0 +1,177 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.monitoring.collector.ccr; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; +import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; +import org.elasticsearch.xpack.monitoring.exporter.BaseMonitoringDocTestCase; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.NavigableMap; +import java.util.TreeMap; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; + +public class CcrStatsMonitoringDocTests extends BaseMonitoringDocTestCase { + + private ShardFollowNodeTaskStatus status; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + status = mock(ShardFollowNodeTaskStatus.class); + } + + public void testConstructorStatusMustNotBeNull() { + final NullPointerException e = + expectThrows(NullPointerException.class, () -> new CcrStatsMonitoringDoc(cluster, timestamp, interval, node, null)); + assertThat(e, hasToString(containsString("status"))); + } + + @Override + protected CcrStatsMonitoringDoc createMonitoringDoc( + final String cluster, + final long timestamp, + final long interval, + final MonitoringDoc.Node node, + final MonitoredSystem system, + final String type, + final String id) { + return new CcrStatsMonitoringDoc(cluster, timestamp, interval, node, status); + } + + @Override + protected void assertMonitoringDoc(CcrStatsMonitoringDoc document) { + assertThat(document.getSystem(), is(MonitoredSystem.ES)); + assertThat(document.getType(), is(CcrStatsMonitoringDoc.TYPE)); + assertThat(document.getId(), nullValue()); + assertThat(document.status(), is(status)); + } + + @Override + public void testToXContent() throws IOException { + final long timestamp = System.currentTimeMillis(); + final long intervalMillis = System.currentTimeMillis(); + final long nodeTimestamp = System.currentTimeMillis(); + final MonitoringDoc.Node node = new MonitoringDoc.Node("_uuid", "_host", "_addr", "_ip", "_name", nodeTimestamp); + // these random values do not need to be internally consistent, they are only for testing formatting + final int shardId = randomIntBetween(0, Integer.MAX_VALUE); + final long leaderGlobalCheckpoint = randomNonNegativeLong(); + final long leaderMaxSeqNo = randomNonNegativeLong(); + final long followerGlobalCheckpoint = randomNonNegativeLong(); + final long followerMaxSeqNo = randomNonNegativeLong(); + final long lastRequestedSeqNo = randomNonNegativeLong(); + final int numberOfConcurrentReads = randomIntBetween(1, Integer.MAX_VALUE); + final int numberOfConcurrentWrites = randomIntBetween(1, Integer.MAX_VALUE); + final int numberOfQueuedWrites = randomIntBetween(0, Integer.MAX_VALUE); + final long mappingVersion = randomIntBetween(0, Integer.MAX_VALUE); + final long totalFetchTimeMillis = randomLongBetween(0, 4096); + final long numberOfSuccessfulFetches = randomNonNegativeLong(); + final long numberOfFailedFetches = randomLongBetween(0, 8); + final long operationsReceived = randomNonNegativeLong(); + final long totalTransferredBytes = randomNonNegativeLong(); + final long totalIndexTimeMillis = randomNonNegativeLong(); + final long numberOfSuccessfulBulkOperations = randomNonNegativeLong(); + final long numberOfFailedBulkOperations = randomNonNegativeLong(); + final long numberOfOperationsIndexed = randomNonNegativeLong(); + final NavigableMap fetchExceptions = + new TreeMap<>(Collections.singletonMap(randomNonNegativeLong(), new ElasticsearchException("shard is sad"))); + final long timeSinceLastFetchMillis = randomNonNegativeLong(); + final ShardFollowNodeTaskStatus status = new ShardFollowNodeTaskStatus( + "cluster_alias:leader_index", + "follower_index", + shardId, + leaderGlobalCheckpoint, + leaderMaxSeqNo, + followerGlobalCheckpoint, + followerMaxSeqNo, + lastRequestedSeqNo, + numberOfConcurrentReads, + numberOfConcurrentWrites, + numberOfQueuedWrites, + mappingVersion, + totalFetchTimeMillis, + numberOfSuccessfulFetches, + numberOfFailedFetches, + operationsReceived, + totalTransferredBytes, + totalIndexTimeMillis, + numberOfSuccessfulBulkOperations, + numberOfFailedBulkOperations, + numberOfOperationsIndexed, + fetchExceptions, + timeSinceLastFetchMillis); + final CcrStatsMonitoringDoc document = new CcrStatsMonitoringDoc("_cluster", timestamp, intervalMillis, node, status); + final BytesReference xContent = XContentHelper.toXContent(document, XContentType.JSON, false); + assertThat( + xContent.utf8ToString(), + equalTo( + "{" + + "\"cluster_uuid\":\"_cluster\"," + + "\"timestamp\":\"" + new DateTime(timestamp, DateTimeZone.UTC).toString() + "\"," + + "\"interval_ms\":" + intervalMillis + "," + + "\"type\":\"ccr_stats\"," + + "\"source_node\":{" + + "\"uuid\":\"_uuid\"," + + "\"host\":\"_host\"," + + "\"transport_address\":\"_addr\"," + + "\"ip\":\"_ip\"," + + "\"name\":\"_name\"," + + "\"timestamp\":\"" + new DateTime(nodeTimestamp, DateTimeZone.UTC).toString() + "\"" + + "}," + + "\"ccr_stats\":{" + + "\"leader_index\":\"cluster_alias:leader_index\"," + + "\"follower_index\":\"follower_index\"," + + "\"shard_id\":" + shardId + "," + + "\"leader_global_checkpoint\":" + leaderGlobalCheckpoint + "," + + "\"leader_max_seq_no\":" + leaderMaxSeqNo + "," + + "\"follower_global_checkpoint\":" + followerGlobalCheckpoint + "," + + "\"follower_max_seq_no\":" + followerMaxSeqNo + "," + + "\"last_requested_seq_no\":" + lastRequestedSeqNo + "," + + "\"number_of_concurrent_reads\":" + numberOfConcurrentReads + "," + + "\"number_of_concurrent_writes\":" + numberOfConcurrentWrites + "," + + "\"number_of_queued_writes\":" + numberOfQueuedWrites + "," + + "\"mapping_version\":" + mappingVersion + "," + + "\"total_fetch_time_millis\":" + totalFetchTimeMillis + "," + + "\"number_of_successful_fetches\":" + numberOfSuccessfulFetches + "," + + "\"number_of_failed_fetches\":" + numberOfFailedFetches + "," + + "\"operations_received\":" + operationsReceived + "," + + "\"total_transferred_bytes\":" + totalTransferredBytes + "," + + "\"total_index_time_millis\":" + totalIndexTimeMillis +"," + + "\"number_of_successful_bulk_operations\":" + numberOfSuccessfulBulkOperations + "," + + "\"number_of_failed_bulk_operations\":" + numberOfFailedBulkOperations + "," + + "\"number_of_operations_indexed\":" + numberOfOperationsIndexed + "," + + "\"fetch_exceptions\":[" + + "{" + + "\"from_seq_no\":" + fetchExceptions.keySet().iterator().next() + "," + + "\"exception\":{" + + "\"type\":\"exception\"," + + "\"reason\":\"shard is sad\"" + + "}" + + "}" + + "]," + + "\"time_since_last_fetch_millis\":" + timeSinceLastFetchMillis + + "}" + + "}")); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 363cc7bb882..42a2ad767d3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -971,7 +971,7 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw public Function> getFieldFilter() { if (enabled) { return index -> { - if (getLicenseState().isSecurityEnabled() == false || getLicenseState().isDocumentAndFieldLevelSecurityAllowed() == false) { + if (getLicenseState().isDocumentAndFieldLevelSecurityAllowed() == false) { return MapperPlugin.NOOP_FIELD_PREDICATE; } IndicesAccessControl indicesAccessControl = threadContext.get().getTransient( diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java index ab70b8513de..6f357790d2f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java @@ -76,7 +76,11 @@ public class SecurityFeatureSet implements XPackFeatureSet { @Override public boolean enabled() { - return licenseState != null && licenseState.isSecurityEnabled(); + if (licenseState != null) { + return XPackSettings.SECURITY_ENABLED.get(settings) && + licenseState.isSecurityDisabledByTrialLicense() == false; + } + return false; } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java index 353b4b9729b..3e1f9f97c2f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java @@ -72,7 +72,6 @@ public class SecurityActionFilter extends AbstractComponent implements ActionFil public void apply(Task task, String action, Request request, ActionListener listener, ActionFilterChain chain) { - /* A functional requirement - when the license of security is disabled (invalid/expires), security will continue to operate normally, except all read operations will be blocked. @@ -84,8 +83,7 @@ public class SecurityActionFilter extends AbstractComponent implements ActionFil throw LicenseUtils.newComplianceException(XPackField.SECURITY); } - final boolean securityEnabled = licenseState.isSecurityEnabled(); - if (securityEnabled && licenseState.isAuthAllowed()) { + if (licenseState.isAuthAllowed()) { final ActionListener contextPreservingListener = ContextPreservingActionListener.wrapPreservingContext(listener, threadContext); ActionListener authenticatedListener = ActionListener.wrap( @@ -117,7 +115,7 @@ public class SecurityActionFilter extends AbstractComponent implements ActionFil listener.onFailure(e); } } else if (SECURITY_ACTION_MATCHER.test(action)) { - if (securityEnabled == false && licenseState.isTrialLicense()) { + if (licenseState.isSecurityDisabledByTrialLicense()) { listener.onFailure(new ElasticsearchException("Security must be explicitly enabled when using a trial license. " + "Enable security by setting [xpack.security.enabled] to [true] in the elasticsearch.yml file " + "and restart the node.")); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/BulkShardRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/BulkShardRequestInterceptor.java index cbcdce98eaa..abdaba7cf29 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/BulkShardRequestInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/BulkShardRequestInterceptor.java @@ -37,25 +37,25 @@ public class BulkShardRequestInterceptor extends AbstractComponent implements Re @Override public void intercept(BulkShardRequest request, Authentication authentication, Role userPermissions, String action) { - if (licenseState.isSecurityEnabled() == false || licenseState.isDocumentAndFieldLevelSecurityAllowed() == false) { - return; - } - IndicesAccessControl indicesAccessControl = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + if (licenseState.isDocumentAndFieldLevelSecurityAllowed()) { + IndicesAccessControl indicesAccessControl = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); - for (BulkItemRequest bulkItemRequest : request.items()) { - IndicesAccessControl.IndexAccessControl indexAccessControl = indicesAccessControl.getIndexPermissions(bulkItemRequest.index()); - if (indexAccessControl != null) { - boolean fls = indexAccessControl.getFieldPermissions().hasFieldLevelSecurity(); - boolean dls = indexAccessControl.getQueries() != null; - if (fls || dls) { - if (bulkItemRequest.request() instanceof UpdateRequest) { - throw new ElasticsearchSecurityException("Can't execute a bulk request with update requests embedded if " + + for (BulkItemRequest bulkItemRequest : request.items()) { + IndicesAccessControl.IndexAccessControl indexAccessControl = + indicesAccessControl.getIndexPermissions(bulkItemRequest.index()); + if (indexAccessControl != null) { + boolean fls = indexAccessControl.getFieldPermissions().hasFieldLevelSecurity(); + boolean dls = indexAccessControl.getQueries() != null; + if (fls || dls) { + if (bulkItemRequest.request() instanceof UpdateRequest) { + throw new ElasticsearchSecurityException("Can't execute a bulk request with update requests embedded if " + "field or document level security is enabled", RestStatus.BAD_REQUEST); + } } } - } - logger.trace("intercepted bulk request for index [{}] without any update requests, continuing execution", + logger.trace("intercepted bulk request for index [{}] without any update requests, continuing execution", bulkItemRequest.index()); + } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/FieldAndDocumentLevelSecurityRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/FieldAndDocumentLevelSecurityRequestInterceptor.java index 5116e9b09f8..5f6f4d1643b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/FieldAndDocumentLevelSecurityRequestInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/FieldAndDocumentLevelSecurityRequestInterceptor.java @@ -34,26 +34,25 @@ abstract class FieldAndDocumentLevelSecurityRequestInterceptor permissionsMap = new HashMap<>(); - for (IndicesAliasesRequest.AliasActions aliasAction : request.getAliasActions()) { - if (aliasAction.actionType() == IndicesAliasesRequest.AliasActions.Type.ADD) { - for (String index : aliasAction.indices()) { - Automaton indexPermissions = permissionsMap.computeIfAbsent(index, userPermissions.indices()::allowedActionsMatcher); - for (String alias : aliasAction.aliases()) { - Automaton aliasPermissions = + Map permissionsMap = new HashMap<>(); + for (IndicesAliasesRequest.AliasActions aliasAction : request.getAliasActions()) { + if (aliasAction.actionType() == IndicesAliasesRequest.AliasActions.Type.ADD) { + for (String index : aliasAction.indices()) { + Automaton indexPermissions = + permissionsMap.computeIfAbsent(index, userPermissions.indices()::allowedActionsMatcher); + for (String alias : aliasAction.aliases()) { + Automaton aliasPermissions = permissionsMap.computeIfAbsent(alias, userPermissions.indices()::allowedActionsMatcher); - if (Operations.subsetOf(aliasPermissions, indexPermissions) == false) { - // TODO we've already audited a access granted event so this is going to look ugly - auditTrailService.accessDenied(authentication, action, request, userPermissions.names()); - throw Exceptions.authorizationError("Adding an alias is not allowed when the alias " + + if (Operations.subsetOf(aliasPermissions, indexPermissions) == false) { + // TODO we've already audited a access granted event so this is going to look ugly + auditTrailService.accessDenied(authentication, action, request, userPermissions.names()); + throw Exceptions.authorizationError("Adding an alias is not allowed when the alias " + "has more permissions than any of the indices"); + } } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptor.java index a4d5eecb92f..255f46cb02c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptor.java @@ -39,31 +39,33 @@ public final class ResizeRequestInterceptor extends AbstractComponent implements @Override public void intercept(ResizeRequest request, Authentication authentication, Role userPermissions, String action) { - if (licenseState.isSecurityEnabled() == false) { - return; - } - - if (licenseState.isDocumentAndFieldLevelSecurityAllowed()) { - IndicesAccessControl indicesAccessControl = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); - IndicesAccessControl.IndexAccessControl indexAccessControl = indicesAccessControl.getIndexPermissions(request.getSourceIndex()); - if (indexAccessControl != null) { - final boolean fls = indexAccessControl.getFieldPermissions().hasFieldLevelSecurity(); - final boolean dls = indexAccessControl.getQueries() != null; - if (fls || dls) { - throw new ElasticsearchSecurityException("Resize requests are not allowed for users when " + + final XPackLicenseState frozenLicenseState = licenseState.copyCurrentLicenseState(); + if (frozenLicenseState.isAuthAllowed()) { + if (frozenLicenseState.isDocumentAndFieldLevelSecurityAllowed()) { + IndicesAccessControl indicesAccessControl = + threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + IndicesAccessControl.IndexAccessControl indexAccessControl = + indicesAccessControl.getIndexPermissions(request.getSourceIndex()); + if (indexAccessControl != null) { + final boolean fls = indexAccessControl.getFieldPermissions().hasFieldLevelSecurity(); + final boolean dls = indexAccessControl.getQueries() != null; + if (fls || dls) { + throw new ElasticsearchSecurityException("Resize requests are not allowed for users when " + "field or document level security is enabled on the source index", RestStatus.BAD_REQUEST); + } } } - } - // ensure that the user would have the same level of access OR less on the target index - final Automaton sourceIndexPermissions = userPermissions.indices().allowedActionsMatcher(request.getSourceIndex()); - final Automaton targetIndexPermissions = userPermissions.indices().allowedActionsMatcher(request.getTargetIndexRequest().index()); - if (Operations.subsetOf(targetIndexPermissions, sourceIndexPermissions) == false) { - // TODO we've already audited a access granted event so this is going to look ugly - auditTrailService.accessDenied(authentication, action, request, userPermissions.names()); - throw Exceptions.authorizationError("Resizing an index is not allowed when the target index " + + // ensure that the user would have the same level of access OR less on the target index + final Automaton sourceIndexPermissions = userPermissions.indices().allowedActionsMatcher(request.getSourceIndex()); + final Automaton targetIndexPermissions = + userPermissions.indices().allowedActionsMatcher(request.getTargetIndexRequest().index()); + if (Operations.subsetOf(targetIndexPermissions, sourceIndexPermissions) == false) { + // TODO we've already audited a access granted event so this is going to look ugly + auditTrailService.accessDenied(authentication, action, request, userPermissions.names()); + throw Exceptions.authorizationError("Resizing an index is not allowed when the target index " + "has more permissions than the source index"); + } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java index 3cd12b1a7ce..e36dee3d67c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java @@ -42,7 +42,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void authenticationSuccess(String realm, User user, RestRequest request) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.authenticationSuccess(realm, user, request); } @@ -51,7 +51,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void authenticationSuccess(String realm, User user, String action, TransportMessage message) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.authenticationSuccess(realm, user, action, message); } @@ -60,7 +60,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void anonymousAccessDenied(String action, TransportMessage message) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.anonymousAccessDenied(action, message); } @@ -69,7 +69,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void anonymousAccessDenied(RestRequest request) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.anonymousAccessDenied(request); } @@ -78,7 +78,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void authenticationFailed(RestRequest request) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.authenticationFailed(request); } @@ -87,7 +87,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void authenticationFailed(String action, TransportMessage message) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.authenticationFailed(action, message); } @@ -96,7 +96,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void authenticationFailed(AuthenticationToken token, String action, TransportMessage message) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.authenticationFailed(token, action, message); } @@ -105,7 +105,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void authenticationFailed(String realm, AuthenticationToken token, String action, TransportMessage message) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.authenticationFailed(realm, token, action, message); } @@ -114,7 +114,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void authenticationFailed(AuthenticationToken token, RestRequest request) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.authenticationFailed(token, request); } @@ -123,7 +123,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void authenticationFailed(String realm, AuthenticationToken token, RestRequest request) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.authenticationFailed(realm, token, request); } @@ -132,7 +132,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void accessGranted(Authentication authentication, String action, TransportMessage message, String[] roleNames) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.accessGranted(authentication, action, message, roleNames); } @@ -141,7 +141,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void accessDenied(Authentication authentication, String action, TransportMessage message, String[] roleNames) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.accessDenied(authentication, action, message, roleNames); } @@ -150,7 +150,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void tamperedRequest(RestRequest request) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.tamperedRequest(request); } @@ -159,7 +159,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void tamperedRequest(String action, TransportMessage message) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.tamperedRequest(action, message); } @@ -168,7 +168,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void tamperedRequest(User user, String action, TransportMessage request) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.tamperedRequest(user, action, request); } @@ -177,7 +177,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void connectionGranted(InetAddress inetAddress, String profile, SecurityIpFilterRule rule) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.connectionGranted(inetAddress, profile, rule); } @@ -186,7 +186,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void connectionDenied(InetAddress inetAddress, String profile, SecurityIpFilterRule rule) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.connectionDenied(inetAddress, profile, rule); } @@ -195,7 +195,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void runAsGranted(Authentication authentication, String action, TransportMessage message, String[] roleNames) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.runAsGranted(authentication, action, message, roleNames); } @@ -204,7 +204,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void runAsDenied(Authentication authentication, String action, TransportMessage message, String[] roleNames) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.runAsDenied(authentication, action, message, roleNames); } @@ -213,7 +213,7 @@ public class AuditTrailService extends AbstractComponent implements AuditTrail { @Override public void runAsDenied(Authentication authentication, RestRequest request, String[] roleNames) { - if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + if (licenseState.isAuditingAllowed()) { for (AuditTrail auditTrail : auditTrails) { auditTrail.runAsDenied(authentication, request, roleNames); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java index d2573b9343d..ce45ee2bedf 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java @@ -98,7 +98,7 @@ public class Realms extends AbstractComponent implements Iterable { @Override public Iterator iterator() { - if (licenseState.isSecurityEnabled() == false || licenseState.isAuthAllowed() == false) { + if (licenseState.isAuthAllowed() == false) { return Collections.emptyIterator(); } @@ -120,7 +120,7 @@ public class Realms extends AbstractComponent implements Iterable { } public List asList() { - if (licenseState.isSecurityEnabled() == false || licenseState.isAuthAllowed() == false) { + if (licenseState.isAuthAllowed() == false) { return Collections.emptyList(); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListener.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListener.java index 6658d095b9c..e3121c9512d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListener.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListener.java @@ -45,7 +45,7 @@ public final class SecuritySearchOperationListener implements SearchOperationLis */ @Override public void onNewScrollContext(SearchContext searchContext) { - if (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed()) { + if (licenseState.isAuthAllowed()) { searchContext.scrollContext().putInContext(AuthenticationField.AUTHENTICATION_KEY, Authentication.getAuthentication(threadContext)); } @@ -57,7 +57,7 @@ public final class SecuritySearchOperationListener implements SearchOperationLis */ @Override public void validateSearchContext(SearchContext searchContext, TransportRequest request) { - if (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed()) { + if (licenseState.isAuthAllowed()) { if (searchContext.scrollContext() != null) { final Authentication originalAuth = searchContext.scrollContext().getFromContext(AuthenticationField.AUTHENTICATION_KEY); final Authentication current = Authentication.getAuthentication(threadContext); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java index a49bfdfbe16..1ace72a1da0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java @@ -59,8 +59,7 @@ public final class OptOutQueryCache extends AbstractIndexComponent implements Qu @Override public Weight doCache(Weight weight, QueryCachingPolicy policy) { - // TODO: this is not concurrently safe since the license state can change between reads - if (licenseState.isSecurityEnabled() == false || licenseState.isAuthAllowed() == false) { + if (licenseState.isAuthAllowed() == false) { logger.debug("not opting out of the query cache; authorization is not allowed"); return indicesQueryCache.doCache(weight, policy); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java index 8d304302e03..7b14f218c43 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java @@ -46,7 +46,7 @@ public class SecurityRestFilter implements RestHandler { @Override public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { - if (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed() && request.method() != Method.OPTIONS) { + if (licenseState.isAuthAllowed() && request.method() != Method.OPTIONS) { // CORS - allow for preflight unauthenticated OPTIONS request if (extractClientCertificate) { HttpChannel httpChannel = request.getHttpChannel(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java index 9006ec620b5..dd1e387b989 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java @@ -14,6 +14,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; import java.io.IOException; @@ -64,16 +65,14 @@ public abstract class SecurityBaseRestHandler extends BaseRestHandler { * sent to the requestor */ protected Exception checkFeatureAvailable(RestRequest request) { - if (licenseState.isSecurityAvailable() == false) { + if (XPackSettings.SECURITY_ENABLED.get(settings) == false) { + return new IllegalStateException("Security is not enabled but a security rest handler is registered"); + } else if (licenseState.isSecurityAvailable() == false) { return LicenseUtils.newComplianceException(XPackField.SECURITY); - } else if (licenseState.isSecurityEnabled() == false) { - if (licenseState.isTrialLicense()) { - return new ElasticsearchException("Security must be explicitly enabled when using a trial license. " + - "Enable security by setting [xpack.security.enabled] to [true] in the elasticsearch.yml file " + - "and restart the node."); - } else { - return new IllegalStateException("Security is not enabled but a security rest handler is registered"); - } + } else if (licenseState.isSecurityDisabledByTrialLicense()) { + return new ElasticsearchException("Security must be explicitly enabled when using a trial license. " + + "Enable security by setting [xpack.security.enabled] to [true] in the elasticsearch.yml file " + + "and restart the node."); } else { return null; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index 3b761522fa7..14081e136d3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -107,7 +107,7 @@ public class SecurityServerTransportInterceptor extends AbstractComponent implem // guarantee we use the same value wherever we would check the value for the state // being recovered final boolean stateNotRecovered = isStateNotRecovered; - final boolean sendWithAuth = (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed()) || stateNotRecovered; + final boolean sendWithAuth = licenseState.isAuthAllowed() || stateNotRecovered; if (sendWithAuth) { // the transport in core normally does this check, BUT since we are serializing to a string header we need to do it // ourselves otherwise we wind up using a version newer than what we can actually send @@ -266,7 +266,7 @@ public class SecurityServerTransportInterceptor extends AbstractComponent implem public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { final AbstractRunnable receiveMessage = getReceiveRunnable(request, channel, task); try (ThreadContext.StoredContext ctx = threadContext.newStoredContext(true)) { - if (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed()) { + if (licenseState.isAuthAllowed()) { String profile = channel.getProfileName(); ServerTransportFilter filter = profileFilters.get(profile); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/IPFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/IPFilter.java index 586e9cd6507..860d6bb69b6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/IPFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/IPFilter.java @@ -198,7 +198,7 @@ public class IPFilter { } public boolean accept(String profile, InetSocketAddress peerAddress) { - if (licenseState.isSecurityEnabled() == false || licenseState.isIpFilteringAllowed() == false) { + if (licenseState.isIpFilteringAllowed() == false) { return true; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java index 7a35b0bc422..ad1bb7be95c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.license; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsIndices; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; @@ -52,8 +53,11 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -238,7 +242,7 @@ public class LicensingTests extends SecurityIntegTestCase { License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.TRIAL, License.OperationMode.PLATINUM, License.OperationMode.STANDARD); enableLicensing(mode); - // security actions should not work! + // security actions should work! try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); GetUsersResponse response = new SecurityClient(client).prepareGetUsers().get(); @@ -278,6 +282,10 @@ public class LicensingTests extends SecurityIntegTestCase { enableLicensing(mode); ensureGreen(); + final List unicastHostsList = internalCluster().masterClient().admin().cluster().nodesInfo(new NodesInfoRequest()).get() + .getNodes().stream().map(n -> n.getTransport().getAddress().publishAddress().toString()).distinct() + .collect(Collectors.toList()); + Path home = createTempDir(); Path conf = home.resolve("config"); Files.createDirectories(conf); @@ -291,7 +299,8 @@ public class LicensingTests extends SecurityIntegTestCase { .put("path.home", home) .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false) .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "test-zen") - .put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "test-zen") + .putList(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey()) + .putList(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), unicastHostsList) .build(); Collection> mockPlugins = Arrays.asList(LocalStateSecurity.class, TestZenDiscovery.TestPlugin.class, MockHttpTransport.TestPlugin.class); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java index 076ce6c9fcb..2944cd3134a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java @@ -55,7 +55,6 @@ public class SecurityFeatureSetTests extends ESTestCase { public void init() throws Exception { settings = Settings.builder().put("path.home", createTempDir()).build(); licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); realms = mock(Realms.class); ipFilter = mock(IPFilter.class); rolesStore = mock(CompositeRolesStore.class); @@ -77,7 +76,7 @@ public class SecurityFeatureSetTests extends ESTestCase { rolesStore, roleMappingStore, ipFilter); assertThat(featureSet.enabled(), is(true)); - when(licenseState.isSecurityEnabled()).thenReturn(false); + when(licenseState.isSecurityDisabledByTrialLicense()).thenReturn(true); featureSet = new SecurityFeatureSet(settings, licenseState, realms, rolesStore, roleMappingStore, ipFilter); assertThat(featureSet.enabled(), is(false)); @@ -90,7 +89,7 @@ public class SecurityFeatureSetTests extends ESTestCase { Settings.Builder settings = Settings.builder().put(this.settings); boolean enabled = randomBoolean(); - when(licenseState.isSecurityEnabled()).thenReturn(enabled); + settings.put(XPackSettings.SECURITY_ENABLED.getKey(), enabled); final boolean httpSSLEnabled = randomBoolean(); settings.put("xpack.security.http.ssl.enabled", httpSSLEnabled); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java index 577c7ddb249..93df605a74f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java @@ -67,7 +67,6 @@ public class SecurityActionFilterTests extends ESTestCase { licenseState = mock(XPackLicenseState.class); when(licenseState.isAuthAllowed()).thenReturn(true); when(licenseState.isStatsAndHealthAllowed()).thenReturn(true); - when(licenseState.isSecurityEnabled()).thenReturn(true); ThreadPool threadPool = mock(ThreadPool.class); threadContext = new ThreadContext(Settings.EMPTY); when(threadPool.getThreadContext()).thenReturn(threadContext); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java index 7c951c0014e..a5798be9746 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java @@ -35,7 +35,8 @@ public class IndicesAliasesRequestInterceptorTests extends ESTestCase { public void testInterceptorThrowsWhenFLSDLSEnabled() { XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.copyCurrentLicenseState()).thenReturn(licenseState); + when(licenseState.isAuthAllowed()).thenReturn(true); when(licenseState.isAuditingAllowed()).thenReturn(true); when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); @@ -81,7 +82,8 @@ public class IndicesAliasesRequestInterceptorTests extends ESTestCase { public void testInterceptorThrowsWhenTargetHasGreaterPermissions() throws Exception { XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.copyCurrentLicenseState()).thenReturn(licenseState); + when(licenseState.isAuthAllowed()).thenReturn(true); when(licenseState.isAuditingAllowed()).thenReturn(true); when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java index f1363214b07..008928794db 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java @@ -37,7 +37,8 @@ public class ResizeRequestInterceptorTests extends ESTestCase { public void testResizeRequestInterceptorThrowsWhenFLSDLSEnabled() { XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.copyCurrentLicenseState()).thenReturn(licenseState); + when(licenseState.isAuthAllowed()).thenReturn(true); when(licenseState.isAuditingAllowed()).thenReturn(true); when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); ThreadPool threadPool = mock(ThreadPool.class); @@ -76,7 +77,8 @@ public class ResizeRequestInterceptorTests extends ESTestCase { public void testResizeRequestInterceptorThrowsWhenTargetHasGreaterPermissions() throws Exception { XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.copyCurrentLicenseState()).thenReturn(licenseState); + when(licenseState.isAuthAllowed()).thenReturn(true); when(licenseState.isAuditingAllowed()).thenReturn(true); when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); ThreadPool threadPool = mock(ThreadPool.class); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java index b346fc6857e..13a7e5c3cf7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java @@ -48,7 +48,6 @@ public class AuditTrailServiceTests extends ESTestCase { licenseState = mock(XPackLicenseState.class); service = new AuditTrailService(Settings.EMPTY, auditTrails, licenseState); isAuditingAllowed = randomBoolean(); - when(licenseState.isSecurityEnabled()).thenReturn(true); when(licenseState.isAuditingAllowed()).thenReturn(isAuditingAllowed); token = mock(AuthenticationToken.class); message = mock(TransportMessage.class); @@ -58,7 +57,6 @@ public class AuditTrailServiceTests extends ESTestCase { public void testAuthenticationFailed() throws Exception { service.authenticationFailed(token, "_action", message); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).authenticationFailed(token, "_action", message); @@ -71,7 +69,6 @@ public class AuditTrailServiceTests extends ESTestCase { public void testAuthenticationFailedNoToken() throws Exception { service.authenticationFailed("_action", message); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).authenticationFailed("_action", message); @@ -84,7 +81,6 @@ public class AuditTrailServiceTests extends ESTestCase { public void testAuthenticationFailedRestNoToken() throws Exception { service.authenticationFailed(restRequest); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).authenticationFailed(restRequest); @@ -97,7 +93,6 @@ public class AuditTrailServiceTests extends ESTestCase { public void testAuthenticationFailedRest() throws Exception { service.authenticationFailed(token, restRequest); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).authenticationFailed(token, restRequest); @@ -110,7 +105,6 @@ public class AuditTrailServiceTests extends ESTestCase { public void testAuthenticationFailedRealm() throws Exception { service.authenticationFailed("_realm", token, "_action", message); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).authenticationFailed("_realm", token, "_action", message); @@ -123,7 +117,6 @@ public class AuditTrailServiceTests extends ESTestCase { public void testAuthenticationFailedRestRealm() throws Exception { service.authenticationFailed("_realm", token, restRequest); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).authenticationFailed("_realm", token, restRequest); @@ -136,7 +129,6 @@ public class AuditTrailServiceTests extends ESTestCase { public void testAnonymousAccess() throws Exception { service.anonymousAccessDenied("_action", message); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).anonymousAccessDenied("_action", message); @@ -152,7 +144,6 @@ public class AuditTrailServiceTests extends ESTestCase { String[] roles = new String[] { randomAlphaOfLengthBetween(1, 6) }; service.accessGranted(authentication, "_action", message, roles); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).accessGranted(authentication, "_action", message, roles); @@ -168,7 +159,6 @@ public class AuditTrailServiceTests extends ESTestCase { String[] roles = new String[] { randomAlphaOfLengthBetween(1, 6) }; service.accessDenied(authentication, "_action", message, roles); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).accessDenied(authentication, "_action", message, roles); @@ -183,7 +173,6 @@ public class AuditTrailServiceTests extends ESTestCase { SecurityIpFilterRule rule = randomBoolean() ? SecurityIpFilterRule.ACCEPT_ALL : IPFilter.DEFAULT_PROFILE_ACCEPT_ALL; service.connectionGranted(inetAddress, "client", rule); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).connectionGranted(inetAddress, "client", rule); @@ -198,7 +187,6 @@ public class AuditTrailServiceTests extends ESTestCase { SecurityIpFilterRule rule = new SecurityIpFilterRule(false, "_all"); service.connectionDenied(inetAddress, "client", rule); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).connectionDenied(inetAddress, "client", rule); @@ -213,7 +201,6 @@ public class AuditTrailServiceTests extends ESTestCase { String realm = "_realm"; service.authenticationSuccess(realm, user, restRequest); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).authenticationSuccess(realm, user, restRequest); @@ -228,7 +215,6 @@ public class AuditTrailServiceTests extends ESTestCase { String realm = "_realm"; service.authenticationSuccess(realm, user, "_action", message); verify(licenseState).isAuditingAllowed(); - verify(licenseState).isSecurityEnabled(); if (isAuditingAllowed) { for (AuditTrail auditTrail : auditTrails) { verify(auditTrail).authenticationSuccess(realm, user, "_action", message); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 1640ab727fe..65f69b397ba 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -154,7 +154,6 @@ public class AuthenticationServiceTests extends ESTestCase { XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.allowedRealmType()).thenReturn(XPackLicenseState.AllowedRealmType.ALL); when(licenseState.isAuthAllowed()).thenReturn(true); - when(licenseState.isSecurityEnabled()).thenReturn(true); realms = new TestRealms(Settings.EMPTY, TestEnvironment.newEnvironment(settings), Collections.emptyMap(), licenseState, threadContext, mock(ReservedRealm.class), Arrays.asList(firstRealm, secondRealm), Collections.singletonList(firstRealm)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java index 9d795826298..c5fbb39fee6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -69,7 +69,6 @@ public class RealmsTests extends ESTestCase { threadContext = new ThreadContext(Settings.EMPTY); reservedRealm = mock(ReservedRealm.class); when(licenseState.isAuthAllowed()).thenReturn(true); - when(licenseState.isSecurityEnabled()).thenReturn(true); when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.ALL); when(reservedRealm.type()).thenReturn(ReservedRealm.TYPE); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java index fac88e8af09..91d61e1ca5c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java @@ -39,7 +39,6 @@ public class SecuritySearchOperationListenerTests extends ESTestCase { public void testUnlicensed() { XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); when(licenseState.isAuthAllowed()).thenReturn(false); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); AuditTrailService auditTrailService = mock(AuditTrailService.class); @@ -49,7 +48,6 @@ public class SecuritySearchOperationListenerTests extends ESTestCase { SecuritySearchOperationListener listener = new SecuritySearchOperationListener(threadContext, licenseState, auditTrailService); listener.onNewScrollContext(searchContext); listener.validateSearchContext(searchContext, Empty.INSTANCE); - verify(licenseState, times(2)).isSecurityEnabled(); verify(licenseState, times(2)).isAuthAllowed(); verifyZeroInteractions(auditTrailService, searchContext); } @@ -60,7 +58,6 @@ public class SecuritySearchOperationListenerTests extends ESTestCase { final Scroll scroll = new Scroll(TimeValue.timeValueSeconds(2L)); testSearchContext.scrollContext().scroll = scroll; XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); when(licenseState.isAuthAllowed()).thenReturn(true); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); AuditTrailService auditTrailService = mock(AuditTrailService.class); @@ -75,7 +72,6 @@ public class SecuritySearchOperationListenerTests extends ESTestCase { assertEquals(scroll, testSearchContext.scrollContext().scroll); verify(licenseState).isAuthAllowed(); - verify(licenseState).isSecurityEnabled(); verifyZeroInteractions(auditTrailService); } @@ -86,7 +82,6 @@ public class SecuritySearchOperationListenerTests extends ESTestCase { new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null)); testSearchContext.scrollContext().scroll = new Scroll(TimeValue.timeValueSeconds(2L)); XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); when(licenseState.isAuthAllowed()).thenReturn(true); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); AuditTrailService auditTrailService = mock(AuditTrailService.class); @@ -97,7 +92,6 @@ public class SecuritySearchOperationListenerTests extends ESTestCase { authentication.writeToContext(threadContext); listener.validateSearchContext(testSearchContext, Empty.INSTANCE); verify(licenseState).isAuthAllowed(); - verify(licenseState).isSecurityEnabled(); verifyZeroInteractions(auditTrailService); } @@ -108,7 +102,6 @@ public class SecuritySearchOperationListenerTests extends ESTestCase { authentication.writeToContext(threadContext); listener.validateSearchContext(testSearchContext, Empty.INSTANCE); verify(licenseState, times(2)).isAuthAllowed(); - verify(licenseState, times(2)).isSecurityEnabled(); verifyZeroInteractions(auditTrailService); } @@ -125,7 +118,6 @@ public class SecuritySearchOperationListenerTests extends ESTestCase { expectThrows(SearchContextMissingException.class, () -> listener.validateSearchContext(testSearchContext, request)); assertEquals(testSearchContext.id(), expected.id()); verify(licenseState, times(3)).isAuthAllowed(); - verify(licenseState, times(3)).isSecurityEnabled(); verify(auditTrailService).accessDenied(authentication, "action", request, authentication.getUser().roles()); } @@ -142,7 +134,6 @@ public class SecuritySearchOperationListenerTests extends ESTestCase { final InternalScrollSearchRequest request = new InternalScrollSearchRequest(); listener.validateSearchContext(testSearchContext, request); verify(licenseState, times(4)).isAuthAllowed(); - verify(licenseState, times(4)).isSecurityEnabled(); verifyNoMoreInteractions(auditTrailService); } @@ -161,7 +152,6 @@ public class SecuritySearchOperationListenerTests extends ESTestCase { expectThrows(SearchContextMissingException.class, () -> listener.validateSearchContext(testSearchContext, request)); assertEquals(testSearchContext.id(), expected.id()); verify(licenseState, times(5)).isAuthAllowed(); - verify(licenseState, times(5)).isSecurityEnabled(); verify(auditTrailService).accessDenied(authentication, "action", request, authentication.getUser().roles()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java index efe154f8d78..d2b6c736fd8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java @@ -48,7 +48,7 @@ public class OptOutQueryCacheTests extends ESTestCase { DirectoryReader reader; @Before - void initLuceneStuff() throws IOException { + public void initLuceneStuff() throws IOException { dir = newDirectory(); w = new RandomIndexWriter(random(), dir); reader = w.getReader(); @@ -56,11 +56,12 @@ public class OptOutQueryCacheTests extends ESTestCase { } @After - void closeLuceneStuff() throws IOException { + public void closeLuceneStuff() throws IOException { w.close(); dir.close(); reader.close(); } + public void testOptOutQueryCacheSafetyCheck() throws IOException { BooleanQuery.Builder builder = new BooleanQuery.Builder(); @@ -123,25 +124,6 @@ public class OptOutQueryCacheTests extends ESTestCase { assertFalse(OptOutQueryCache.cachingIsSafe(weight, permissions)); } - public void testOptOutQueryCacheSecurityIsNotEnabled() { - final Settings.Builder settings = Settings.builder() - .put("index.version.created", Version.CURRENT) - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 0); - final IndexMetaData indexMetaData = IndexMetaData.builder("index").settings(settings).build(); - final IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY); - final IndicesQueryCache indicesQueryCache = mock(IndicesQueryCache.class); - final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - final XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(false); - when(licenseState.isAuthAllowed()).thenReturn(randomBoolean()); - final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); - final Weight weight = mock(Weight.class); - final QueryCachingPolicy policy = mock(QueryCachingPolicy.class); - cache.doCache(weight, policy); - verify(indicesQueryCache).doCache(same(weight), same(policy)); - } - public void testOptOutQueryCacheAuthIsNotAllowed() { final Settings.Builder settings = Settings.builder() .put("index.version.created", Version.CURRENT) @@ -152,7 +134,6 @@ public class OptOutQueryCacheTests extends ESTestCase { final IndicesQueryCache indicesQueryCache = mock(IndicesQueryCache.class); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); final XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(randomBoolean()); when(licenseState.isAuthAllowed()).thenReturn(false); final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); final Weight weight = mock(Weight.class); @@ -171,7 +152,6 @@ public class OptOutQueryCacheTests extends ESTestCase { final IndicesQueryCache indicesQueryCache = mock(IndicesQueryCache.class); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); final XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); when(licenseState.isAuthAllowed()).thenReturn(true); final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); final Weight weight = mock(Weight.class); @@ -196,7 +176,6 @@ public class OptOutQueryCacheTests extends ESTestCase { when(indicesAccessControl.getIndexPermissions("index")).thenReturn(indexAccessControl); threadContext.putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, indicesAccessControl); final XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); when(licenseState.isAuthAllowed()).thenReturn(true); final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); final Weight weight = mock(Weight.class); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java index 5db634c8d7b..4c0ca977a21 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java @@ -60,7 +60,6 @@ public class SecurityRestFilterTests extends ESTestCase { channel = mock(RestChannel.class); licenseState = mock(XPackLicenseState.class); when(licenseState.isAuthAllowed()).thenReturn(true); - when(licenseState.isSecurityEnabled()).thenReturn(true); restHandler = mock(RestHandler.class); filter = new SecurityRestFilter(licenseState, new ThreadContext(Settings.EMPTY), authcService, restHandler, false); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java index c78d0a64745..4ff582f01bd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java @@ -24,11 +24,11 @@ import static org.mockito.Mockito.when; public class SecurityBaseRestHandlerTests extends ESTestCase { public void testSecurityBaseRestHandlerChecksLicenseState() throws Exception { - final boolean securityEnabled = randomBoolean(); + final boolean securityDisabledByTrial = randomBoolean(); final AtomicBoolean consumerCalled = new AtomicBoolean(false); final XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isSecurityAvailable()).thenReturn(true); - when(licenseState.isSecurityEnabled()).thenReturn(securityEnabled); + when(licenseState.isSecurityDisabledByTrialLicense()).thenReturn(securityDisabledByTrial); SecurityBaseRestHandler handler = new SecurityBaseRestHandler(Settings.EMPTY, licenseState) { @Override @@ -46,7 +46,7 @@ public class SecurityBaseRestHandlerTests extends ESTestCase { } }; FakeRestRequest fakeRestRequest = new FakeRestRequest(); - FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), securityEnabled ? 0 : 1); + FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), securityDisabledByTrial ? 1 : 0); NodeClient client = mock(NodeClient.class); assertFalse(consumerCalled.get()); @@ -54,8 +54,7 @@ public class SecurityBaseRestHandlerTests extends ESTestCase { handler.handleRequest(fakeRestRequest, fakeRestChannel, client); verify(licenseState).isSecurityAvailable(); - verify(licenseState).isSecurityEnabled(); - if (securityEnabled) { + if (securityDisabledByTrial == false) { assertTrue(consumerCalled.get()); assertEquals(0, fakeRestChannel.responses().get()); assertEquals(0, fakeRestChannel.errors().get()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index dd7dda48ae8..a7351ccfe14 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -73,7 +73,6 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase { securityContext = spy(new SecurityContext(settings, threadPool.getThreadContext())); xPackLicenseState = mock(XPackLicenseState.class); when(xPackLicenseState.isAuthAllowed()).thenReturn(true); - when(xPackLicenseState.isSecurityEnabled()).thenReturn(true); } @After @@ -102,7 +101,6 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase { sender.sendRequest(null, null, null, null, null); assertTrue(calledWrappedSender.get()); verify(xPackLicenseState).isAuthAllowed(); - verify(xPackLicenseState).isSecurityEnabled(); verifyNoMoreInteractions(xPackLicenseState); verifyZeroInteractions(securityContext); } @@ -112,10 +110,8 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase { mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING))), clusterService); - final boolean securityEnabled = randomBoolean(); - final boolean authAllowed = securityEnabled && randomBoolean(); + final boolean authAllowed = randomBoolean(); when(xPackLicenseState.isAuthAllowed()).thenReturn(authAllowed); - when(xPackLicenseState.isSecurityEnabled()).thenReturn(securityEnabled); ClusterState notRecovered = ClusterState.builder(clusterService.state()) .blocks(ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK).build()) .build(); @@ -139,10 +135,7 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase { sender.sendRequest(connection, "internal:foo", null, null, null); assertTrue(calledWrappedSender.get()); assertEquals(SystemUser.INSTANCE, sendingUser.get()); - verify(xPackLicenseState).isSecurityEnabled(); - if (securityEnabled) { - verify(xPackLicenseState).isAuthAllowed(); - } + verify(xPackLicenseState).isAuthAllowed(); verify(securityContext).executeAsUser(any(User.class), any(Consumer.class), eq(Version.CURRENT)); verifyNoMoreInteractions(xPackLicenseState); } @@ -177,7 +170,6 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase { assertEquals(user, sendingUser.get()); assertEquals(user, securityContext.getUser()); verify(xPackLicenseState).isAuthAllowed(); - verify(xPackLicenseState).isSecurityEnabled(); verify(securityContext, never()).executeAsUser(any(User.class), any(Consumer.class), any(Version.class)); verifyNoMoreInteractions(xPackLicenseState); } @@ -215,7 +207,6 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase { assertEquals(SystemUser.INSTANCE, sendingUser.get()); assertEquals(user, securityContext.getUser()); verify(xPackLicenseState).isAuthAllowed(); - verify(xPackLicenseState).isSecurityEnabled(); verify(securityContext).executeAsUser(any(User.class), any(Consumer.class), eq(Version.CURRENT)); verifyNoMoreInteractions(xPackLicenseState); } @@ -246,7 +237,6 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase { assertEquals("there should always be a user when sending a message for action [indices:foo]", e.getMessage()); assertNull(securityContext.getUser()); verify(xPackLicenseState).isAuthAllowed(); - verify(xPackLicenseState).isSecurityEnabled(); verify(securityContext, never()).executeAsUser(any(User.class), any(Consumer.class), any(Version.class)); verifyNoMoreInteractions(xPackLicenseState); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java index 0ff313ceb25..78825d95ce0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java @@ -53,7 +53,6 @@ public class IPFilterTests extends ESTestCase { public void init() { licenseState = mock(XPackLicenseState.class); when(licenseState.isIpFilteringAllowed()).thenReturn(true); - when(licenseState.isSecurityEnabled()).thenReturn(true); auditTrail = mock(AuditTrailService.class); clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList( IPFilter.HTTP_FILTER_ALLOW_SETTING, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java index 1b45fad8989..ee40d3e24bb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java @@ -57,7 +57,6 @@ public class IpFilterRemoteAddressFilterTests extends ESTestCase { IPFilter.PROFILE_FILTER_DENY_SETTING))); XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isIpFilteringAllowed()).thenReturn(true); - when(licenseState.isSecurityEnabled()).thenReturn(true); AuditTrailService auditTrailService = new AuditTrailService(settings, Collections.emptyList(), licenseState); IPFilter ipFilter = new IPFilter(settings, auditTrailService, clusterSettings, licenseState); ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilterTests.java index 1832669fce1..398b783f642 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilterTests.java @@ -58,7 +58,6 @@ public class NioIPFilterTests extends ESTestCase { IPFilter.PROFILE_FILTER_DENY_SETTING))); XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isIpFilteringAllowed()).thenReturn(true); - when(licenseState.isSecurityEnabled()).thenReturn(true); AuditTrailService auditTrailService = new AuditTrailService(settings, Collections.emptyList(), licenseState); IPFilter ipFilter = new IPFilter(settings, auditTrailService, clusterSettings, licenseState); ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java index 5143a7eceb4..dd18363b2a8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java @@ -7,30 +7,28 @@ package org.elasticsearch.xpack.sql.expression; import org.elasticsearch.xpack.sql.tree.Location; +import java.util.List; import java.util.Objects; import static java.util.Collections.emptyList; -import java.util.List; - /** - * {@link Expression}s that can be converted into Elasticsearch - * sorts, aggregations, or queries. They can also be extracted - * from the result of a search. + * {@link Expression}s that can be materialized and represent the result columns sent to the client. + * Typically are converted into constants, functions or Elasticsearch order-bys, + * aggregations, or queries. They can also be extracted from the result of a search. * * In the statement {@code SELECT ABS(foo), A, B+C FROM ...} the three named - * expressions (ABS(foo), A, B+C) get converted to attributes and the user can + * expressions {@code ABS(foo), A, B+C} get converted to attributes and the user can * only see Attributes. * - * In the statement {@code SELECT foo FROM TABLE WHERE foo > 10 + 1} 10+1 is an - * expression. It's not named - meaning there's no alias for it (defined by the - * user) and as such there's no attribute - no column to be returned to the user. - * It's an expression used for filtering so it doesn't appear in the result set - * (derived table). "foo" on the other hand is an expression, a named expression - * (it has a name) and also an attribute - it's a column in the result set. + * In the statement {@code SELECT foo FROM TABLE WHERE foo > 10 + 1} both {@code foo} and + * {@code 10 + 1} are named expressions, the first due to the SELECT, the second due to being a function. + * However since {@code 10 + 1} is used for filtering it doesn't appear appear in the result set + * (derived table) and as such it is never translated to an attribute. + * "foo" on the other hand is since it's a column in the result set. * - * Another example {@code SELECT foo FROM ... WHERE bar > 10 +1} "foo" gets - * converted into an Attribute, bar does not. That's because bar is used for + * Another example {@code SELECT foo FROM ... WHERE bar > 10 +1} {@code foo} gets + * converted into an Attribute, bar does not. That's because {@code bar} is used for * filtering alone but it's not part of the projection meaning the user doesn't * need it in the derived table. */ diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java index 8ee34e32a55..1b326e0474f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java @@ -82,13 +82,7 @@ public abstract class Expressions { } public static String name(Expression e) { - if (e instanceof NamedExpression) { - return ((NamedExpression) e).name(); - } else if (e instanceof Literal) { - return e.toString(); - } else { - return e.nodeName(); - } + return e instanceof NamedExpression ? ((NamedExpression) e).name() : e.nodeName(); } public static List names(Collection e) { @@ -105,7 +99,7 @@ public abstract class Expressions { return ((NamedExpression) e).toAttribute(); } if (e != null && e.foldable()) { - return new LiteralAttribute(Literal.of(e)); + return Literal.of(e).toAttribute(); } return null; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java index 9a4ffce9295..4badfc7091c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java @@ -12,9 +12,16 @@ import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DataTypeConversion; import org.elasticsearch.xpack.sql.type.DataTypes; +import java.util.List; import java.util.Objects; -public class Literal extends LeafExpression { +import static java.util.Collections.emptyList; + +/** + * SQL Literal or constant. + */ +public class Literal extends NamedExpression { + public static final Literal TRUE = Literal.of(Location.EMPTY, Boolean.TRUE); public static final Literal FALSE = Literal.of(Location.EMPTY, Boolean.FALSE); @@ -22,7 +29,11 @@ public class Literal extends LeafExpression { private final DataType dataType; public Literal(Location location, Object value, DataType dataType) { - super(location); + this(location, null, value, dataType); + } + + public Literal(Location location, String name, Object value, DataType dataType) { + super(location, name == null ? String.valueOf(value) : name, emptyList(), null); this.dataType = dataType; this.value = DataTypeConversion.convert(value, dataType); } @@ -61,10 +72,24 @@ public class Literal extends LeafExpression { return value; } + @Override + public Attribute toAttribute() { + return new LiteralAttribute(location(), name(), null, false, id(), false, dataType, this); + } + + @Override + public Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + @Override + public AttributeSet references() { + return AttributeSet.EMPTY; + } @Override public int hashCode() { - return Objects.hash(value, dataType); + return Objects.hash(name(), value, dataType); } @Override @@ -72,21 +97,25 @@ public class Literal extends LeafExpression { if (this == obj) { return true; } - if (obj == null || getClass() != obj.getClass()) { return false; } Literal other = (Literal) obj; - return Objects.equals(value, other.value) + return Objects.equals(name(), other.name()) + && Objects.equals(value, other.value) && Objects.equals(dataType, other.dataType); } @Override public String toString() { - return Objects.toString(value); + String s = String.valueOf(value); + return name().equals(s) ? s : name() + "=" + value; } + /** + * Utility method for creating 'in-line' Literals (out of values instead of expressions). + */ public static Literal of(Location loc, Object value) { if (value instanceof Literal) { return (Literal) value; @@ -94,15 +123,32 @@ public class Literal extends LeafExpression { return new Literal(loc, value, DataTypes.fromJava(value)); } + /** + * Utility method for creating a literal out of a foldable expression. + * Throws an exception if the expression is not foldable. + */ public static Literal of(Expression foldable) { - if (foldable instanceof Literal) { - return (Literal) foldable; - } + return of((String) null, foldable); + } + public static Literal of(String name, Expression foldable) { if (!foldable.foldable()) { throw new SqlIllegalArgumentException("Foldable expression required for Literal creation; received unfoldable " + foldable); } - return new Literal(foldable.location(), foldable.fold(), foldable.dataType()); + if (foldable instanceof Literal) { + Literal l = (Literal) foldable; + if (name == null || l.name().equals(name)) { + return l; + } + } + + Object fold = foldable.fold(); + + if (name == null) { + name = foldable instanceof NamedExpression ? ((NamedExpression) foldable).name() : String.valueOf(fold); + } + + return new Literal(foldable.location(), name, fold, foldable.dataType()); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java index ff07731b82e..a6483458a6b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java @@ -15,20 +15,12 @@ public class LiteralAttribute extends TypedAttribute { private final Literal literal; - public LiteralAttribute(Literal literal) { - this(literal.location(), String.valueOf(literal.fold()), null, false, null, false, literal.dataType(), literal); - } - public LiteralAttribute(Location location, String name, String qualifier, boolean nullable, ExpressionId id, boolean synthetic, DataType dataType, Literal literal) { super(location, name, dataType, qualifier, nullable, id, synthetic); this.literal = literal; } - public Literal literal() { - return literal; - } - @Override protected NodeInfo info() { return NodeInfo.create(this, LiteralAttribute::new, @@ -49,4 +41,4 @@ public class LiteralAttribute extends TypedAttribute { protected String label() { return "c"; } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java index c9d652861f8..820aafb0116 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java @@ -21,13 +21,16 @@ import org.elasticsearch.xpack.sql.expression.function.aggregate.Sum; import org.elasticsearch.xpack.sql.expression.function.aggregate.SumOfSquares; import org.elasticsearch.xpack.sql.expression.function.aggregate.VarPop; import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mod; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayName; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfMonth; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfWeek; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.HourOfDay; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MinuteOfDay; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MinuteOfHour; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MonthName; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MonthOfYear; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Quarter; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.SecondOfMinute; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.WeekOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Year; @@ -62,21 +65,21 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.string.Ascii; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BitLength; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Char; import org.elasticsearch.xpack.sql.expression.function.scalar.string.CharLength; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.LCase; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.LTrim; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.Length; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.RTrim; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.Space; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.UCase; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Insert; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.LCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.LTrim; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Left; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.Length; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Locate; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Position; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.RTrim; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Repeat; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Replace; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Right; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.Space; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Substring; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.UCase; import org.elasticsearch.xpack.sql.parser.ParsingException; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.util.StringUtils; @@ -123,6 +126,9 @@ public class FunctionRegistry { def(MonthOfYear.class, MonthOfYear::new, "MONTH"), def(Year.class, Year::new), def(WeekOfYear.class, WeekOfYear::new, "WEEK"), + def(DayName.class, DayName::new, "DAYNAME"), + def(MonthName.class, MonthName::new, "MONTHNAME"), + def(Quarter.class, Quarter::new), // Math def(Abs.class, Abs::new), def(ACos.class, ACos::new), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java index 4d68ad57cf9..ae94b0b9f83 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java @@ -111,4 +111,11 @@ public class Cast extends UnaryScalarFunction { public String toString() { return functionName() + "(" + field().toString() + " AS " + to().sqlName() + ")#" + id(); } -} \ No newline at end of file + + @Override + public String name() { + StringBuilder sb = new StringBuilder(super.name()); + sb.insert(sb.length() - 1, " AS " + to().sqlName()); + return sb.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java index 0f36654fa4a..a62aadab467 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java @@ -10,6 +10,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.UnaryArithmeticProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.BucketExtractorProcessor; @@ -17,13 +19,13 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime. import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ConstantProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.HitExtractorProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.StringProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BinaryStringNumericProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BinaryStringStringProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.ConcatFunctionProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.InsertFunctionProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.LocateFunctionProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.ReplaceFunctionProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.StringProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.SubstringFunctionProcessor; import java.util.ArrayList; @@ -52,6 +54,8 @@ public final class Processors { entries.add(new Entry(Processor.class, BinaryMathProcessor.NAME, BinaryMathProcessor::new)); // datetime entries.add(new Entry(Processor.class, DateTimeProcessor.NAME, DateTimeProcessor::new)); + entries.add(new Entry(Processor.class, NamedDateTimeProcessor.NAME, NamedDateTimeProcessor::new)); + entries.add(new Entry(Processor.class, QuarterProcessor.NAME, QuarterProcessor::new)); // math entries.add(new Entry(Processor.class, MathProcessor.NAME, MathProcessor::new)); // string diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java index 309ee4e8e86..e7b8529557f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.FieldAttribute; -import org.elasticsearch.xpack.sql.expression.LiteralAttribute; import org.elasticsearch.xpack.sql.expression.function.Function; import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; @@ -69,11 +68,9 @@ public abstract class ScalarFunction extends Function { if (attr instanceof AggregateFunctionAttribute) { return asScriptFrom((AggregateFunctionAttribute) attr); } - if (attr instanceof LiteralAttribute) { - return asScriptFrom((LiteralAttribute) attr); + if (attr instanceof FieldAttribute) { + return asScriptFrom((FieldAttribute) attr); } - // fall-back to - return asScriptFrom((FieldAttribute) attr); } throw new SqlIllegalArgumentException("Cannot evaluate script for expression {}", exp); } @@ -102,12 +99,6 @@ public abstract class ScalarFunction extends Function { aggregate.dataType()); } - protected ScriptTemplate asScriptFrom(LiteralAttribute literal) { - return new ScriptTemplate(formatScript("{}"), - paramsBuilder().variable(literal.literal()).build(), - literal.dataType()); - } - protected String formatScript(String scriptTemplate) { return formatTemplate(scriptTemplate); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java new file mode 100644 index 00000000000..2213fad8c8d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.TimeZone; + +abstract class BaseDateTimeFunction extends UnaryScalarFunction { + + private final TimeZone timeZone; + private final String name; + + BaseDateTimeFunction(Location location, Expression field, TimeZone timeZone) { + super(location, field); + this.timeZone = timeZone; + + StringBuilder sb = new StringBuilder(super.name()); + // add timezone as last argument + sb.insert(sb.length() - 1, " [" + timeZone.getID() + "]"); + + this.name = sb.toString(); + } + + @Override + protected final NodeInfo info() { + return NodeInfo.create(this, ctorForInfo(), field(), timeZone()); + } + + protected abstract NodeInfo.NodeCtor2 ctorForInfo(); + + @Override + protected TypeResolution resolveType() { + if (field().dataType() == DataType.DATE) { + return TypeResolution.TYPE_RESOLVED; + } + return new TypeResolution("Function [" + functionName() + "] cannot be applied on a non-date expression ([" + + Expressions.name(field()) + "] of type [" + field().dataType().esType + "])"); + } + + public TimeZone timeZone() { + return timeZone; + } + + @Override + public String name() { + return name; + } + + @Override + public boolean foldable() { + return field().foldable(); + } + + @Override + protected ScriptTemplate asScriptFrom(AggregateFunctionAttribute aggregate) { + throw new UnsupportedOperationException(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java new file mode 100644 index 00000000000..95547ded222 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.joda.time.ReadableInstant; + +import java.io.IOException; +import java.util.TimeZone; + +public abstract class BaseDateTimeProcessor implements Processor { + + private final TimeZone timeZone; + + BaseDateTimeProcessor(TimeZone timeZone) { + this.timeZone = timeZone; + } + + BaseDateTimeProcessor(StreamInput in) throws IOException { + timeZone = TimeZone.getTimeZone(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(timeZone.getID()); + } + + TimeZone timeZone() { + return timeZone; + } + + @Override + public Object process(Object l) { + if (l == null) { + return null; + } + long millis; + if (l instanceof String) { + // 6.4+ + millis = Long.parseLong(l.toString()); + } else if (l instanceof ReadableInstant) { + // 6.3- + millis = ((ReadableInstant) l).getMillis(); + } else { + throw new SqlIllegalArgumentException("A string or a date is required; received {}", l); + } + + return doProcess(millis); + } + + abstract Object doProcess(long millis); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java index 60672822278..d87e15084a4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java @@ -6,10 +6,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.FieldAttribute; -import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; -import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; @@ -17,7 +14,6 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definiti import org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder; import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; import org.elasticsearch.xpack.sql.tree.Location; -import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.type.DataType; import org.joda.time.DateTime; @@ -31,45 +27,10 @@ import java.util.TimeZone; import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; -public abstract class DateTimeFunction extends UnaryScalarFunction { - - private final TimeZone timeZone; - private final String name; +public abstract class DateTimeFunction extends BaseDateTimeFunction { DateTimeFunction(Location location, Expression field, TimeZone timeZone) { - super(location, field); - this.timeZone = timeZone; - - StringBuilder sb = new StringBuilder(super.name()); - // add timezone as last argument - sb.insert(sb.length() - 1, " [" + timeZone.getID() + "]"); - - this.name = sb.toString(); - } - - @Override - protected final NodeInfo info() { - return NodeInfo.create(this, ctorForInfo(), field(), timeZone()); - } - - protected abstract NodeInfo.NodeCtor2 ctorForInfo(); - - @Override - protected TypeResolution resolveType() { - if (field().dataType() == DataType.DATE) { - return TypeResolution.TYPE_RESOLVED; - } - return new TypeResolution("Function [" + functionName() + "] cannot be applied on a non-date expression ([" - + Expressions.name(field()) + "] of type [" + field().dataType().esType + "])"); - } - - public TimeZone timeZone() { - return timeZone; - } - - @Override - public boolean foldable() { - return field().foldable(); + super(location, field, timeZone); } @Override @@ -79,7 +40,7 @@ public abstract class DateTimeFunction extends UnaryScalarFunction { return null; } - return dateTimeChrono(folded.getMillis(), timeZone.getID(), chronoField().name()); + return dateTimeChrono(folded.getMillis(), timeZone().getID(), chronoField().name()); } public static Integer dateTimeChrono(long millis, String tzId, String chronoName) { @@ -94,27 +55,21 @@ public abstract class DateTimeFunction extends UnaryScalarFunction { String template = null; template = formatTemplate("{sql}.dateTimeChrono(doc[{}].value.millis, {}, {})"); params.variable(field.name()) - .variable(timeZone.getID()) + .variable(timeZone().getID()) .variable(chronoField().name()); return new ScriptTemplate(template, params.build(), dataType()); } - - @Override - protected ScriptTemplate asScriptFrom(AggregateFunctionAttribute aggregate) { - throw new UnsupportedOperationException(); - } - /** * Used for generating the painless script version of this function when the time zone is not UTC */ protected abstract ChronoField chronoField(); @Override - protected final ProcessorDefinition makeProcessorDefinition() { + protected ProcessorDefinition makeProcessorDefinition() { return new UnaryProcessorDefinition(location(), this, ProcessorDefinitions.toProcessorDefinition(field()), - new DateTimeProcessor(extractor(), timeZone)); + new DateTimeProcessor(extractor(), timeZone())); } protected abstract DateTimeExtractor extractor(); @@ -127,12 +82,6 @@ public abstract class DateTimeFunction extends UnaryScalarFunction { // used for applying ranges public abstract String dateTimeFormat(); - // add tz along the rest of the params - @Override - public String name() { - return name; - } - @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { @@ -140,11 +89,11 @@ public abstract class DateTimeFunction extends UnaryScalarFunction { } DateTimeFunction other = (DateTimeFunction) obj; return Objects.equals(other.field(), field()) - && Objects.equals(other.timeZone, timeZone); + && Objects.equals(other.timeZone(), timeZone()); } @Override public int hashCode() { - return Objects.hash(field(), timeZone); + return Objects.hash(field(), timeZone()); } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java index d135b8a0865..d34b1c1e390 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java @@ -7,19 +7,16 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; -import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; import org.joda.time.DateTime; import org.joda.time.DateTimeFieldType; import org.joda.time.DateTimeZone; import org.joda.time.ReadableDateTime; -import org.joda.time.ReadableInstant; import java.io.IOException; import java.util.Objects; import java.util.TimeZone; -public class DateTimeProcessor implements Processor { +public class DateTimeProcessor extends BaseDateTimeProcessor { public enum DateTimeExtractor { DAY_OF_MONTH(DateTimeFieldType.dayOfMonth()), @@ -45,24 +42,22 @@ public class DateTimeProcessor implements Processor { } public static final String NAME = "dt"; - private final DateTimeExtractor extractor; - private final TimeZone timeZone; public DateTimeProcessor(DateTimeExtractor extractor, TimeZone timeZone) { + super(timeZone); this.extractor = extractor; - this.timeZone = timeZone; } public DateTimeProcessor(StreamInput in) throws IOException { + super(in); extractor = in.readEnum(DateTimeExtractor.class); - timeZone = TimeZone.getTimeZone(in.readString()); } @Override public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); out.writeEnum(extractor); - out.writeString(timeZone.getID()); } @Override @@ -75,32 +70,15 @@ public class DateTimeProcessor implements Processor { } @Override - public Object process(Object l) { - if (l == null) { - return null; - } - - ReadableDateTime dt; - if (l instanceof String) { - // 6.4+ - final long millis = Long.parseLong(l.toString()); - dt = new DateTime(millis, DateTimeZone.forTimeZone(timeZone)); - } else if (l instanceof ReadableInstant) { - // 6.3- - dt = (ReadableDateTime) l; - if (!TimeZone.getTimeZone("UTC").equals(timeZone)) { - dt = dt.toDateTime().withZone(DateTimeZone.forTimeZone(timeZone)); - } - } else { - throw new SqlIllegalArgumentException("A string or a date is required; received {}", l); - } + public Object doProcess(long millis) { + ReadableDateTime dt = new DateTime(millis, DateTimeZone.forTimeZone(timeZone())); return extractor.extract(dt); } @Override public int hashCode() { - return Objects.hash(extractor, timeZone); + return Objects.hash(extractor, timeZone()); } @Override @@ -110,7 +88,7 @@ public class DateTimeProcessor implements Processor { } DateTimeProcessor other = (DateTimeProcessor) obj; return Objects.equals(extractor, other.extractor) - && Objects.equals(timeZone, other.timeZone); + && Objects.equals(timeZone(), other.timeZone()); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayName.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayName.java new file mode 100644 index 00000000000..2f5ba7eeaca --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayName.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.util.TimeZone; + +/** + * Extract the day of the week from a datetime in text format (Monday, Tuesday etc.) + */ +public class DayName extends NamedDateTimeFunction { + protected static final String DAY_NAME_FORMAT = "EEEE"; + + public DayName(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return DayName::new; + } + + @Override + protected DayName replaceChild(Expression newChild) { + return new DayName(location(), newChild, timeZone()); + } + + @Override + protected String dateTimeFormat() { + return DAY_NAME_FORMAT; + } + + @Override + protected NameExtractor nameExtractor() { + return NameExtractor.DAY_NAME; + } + + @Override + public String extractName(long millis, String tzId) { + return nameExtractor().extract(millis, tzId); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java index 1ac3771d49d..ebb576b4648 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java @@ -22,7 +22,7 @@ public class DayOfMonth extends DateTimeFunction { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return DayOfMonth::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java index 7582ece6250..d840d4d71df 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java @@ -22,7 +22,7 @@ public class DayOfWeek extends DateTimeFunction { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return DayOfWeek::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java index 8f5e0618832..1fa248d9c20 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java @@ -23,7 +23,7 @@ public class DayOfYear extends DateTimeFunction { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return DayOfYear::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java index 5a2bc681ab8..4df28bddad0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java @@ -22,7 +22,7 @@ public class HourOfDay extends DateTimeFunction { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return HourOfDay::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java index 2840fa0c21b..ef0fb0bce18 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java @@ -23,7 +23,7 @@ public class MinuteOfDay extends DateTimeFunction { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return MinuteOfDay::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java index d577bb91696..f5ab095ef24 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java @@ -22,7 +22,7 @@ public class MinuteOfHour extends DateTimeFunction { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return MinuteOfHour::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthName.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthName.java new file mode 100644 index 00000000000..170c80c10f9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthName.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.util.TimeZone; + +/** + * Extract the month from a datetime in text format (January, February etc.) + */ +public class MonthName extends NamedDateTimeFunction { + protected static final String MONTH_NAME_FORMAT = "MMMM"; + + public MonthName(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return MonthName::new; + } + + @Override + protected MonthName replaceChild(Expression newChild) { + return new MonthName(location(), newChild, timeZone()); + } + + @Override + protected String dateTimeFormat() { + return MONTH_NAME_FORMAT; + } + + @Override + public String extractName(long millis, String tzId) { + return nameExtractor().extract(millis, tzId); + } + + @Override + protected NameExtractor nameExtractor() { + return NameExtractor.MONTH_NAME; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java index 3a2d51bee78..503a771611e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java @@ -22,7 +22,7 @@ public class MonthOfYear extends DateTimeFunction { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return MonthOfYear::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java new file mode 100644 index 00000000000..c3e10981ce1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.StringUtils; +import org.joda.time.DateTime; + +import java.util.Objects; +import java.util.TimeZone; + +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; + +/* + * Base class for "naming" date/time functions like month_name and day_name + */ +abstract class NamedDateTimeFunction extends BaseDateTimeFunction { + + NamedDateTimeFunction(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public Object fold() { + DateTime folded = (DateTime) field().fold(); + if (folded == null) { + return null; + } + + return extractName(folded.getMillis(), timeZone().getID()); + } + + public abstract String extractName(long millis, String tzId); + + @Override + protected ScriptTemplate asScriptFrom(FieldAttribute field) { + ParamsBuilder params = paramsBuilder(); + + String template = null; + template = formatTemplate(formatMethodName("{sql}.{method_name}(doc[{}].value.millis, {})")); + params.variable(field.name()) + .variable(timeZone().getID()); + + return new ScriptTemplate(template, params.build(), dataType()); + } + + private String formatMethodName(String template) { + // the Painless method name will be the enum's lower camelcase name + return template.replace("{method_name}", StringUtils.underscoreToLowerCamelCase(nameExtractor().toString())); + } + + @Override + protected final ProcessorDefinition makeProcessorDefinition() { + return new UnaryProcessorDefinition(location(), this, ProcessorDefinitions.toProcessorDefinition(field()), + new NamedDateTimeProcessor(nameExtractor(), timeZone())); + } + + protected abstract NameExtractor nameExtractor(); + + protected abstract String dateTimeFormat(); + + @Override + public DataType dataType() { + return DataType.KEYWORD; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + NamedDateTimeFunction other = (NamedDateTimeFunction) obj; + return Objects.equals(other.field(), field()) + && Objects.equals(other.timeZone(), timeZone()); + } + + @Override + public int hashCode() { + return Objects.hash(field(), timeZone()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java new file mode 100644 index 00000000000..478ad8ee09f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.Locale; +import java.util.Objects; +import java.util.TimeZone; +import java.util.function.BiFunction; + +public class NamedDateTimeProcessor extends BaseDateTimeProcessor { + + public enum NameExtractor { + // for the moment we'll use no specific Locale, but we might consider introducing a Locale parameter, just like the timeZone one + DAY_NAME((Long millis, String tzId) -> { + ZonedDateTime time = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(tzId)); + return time.format(DateTimeFormatter.ofPattern(DayName.DAY_NAME_FORMAT, Locale.ROOT)); + }), + MONTH_NAME((Long millis, String tzId) -> { + ZonedDateTime time = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(tzId)); + return time.format(DateTimeFormatter.ofPattern(MonthName.MONTH_NAME_FORMAT, Locale.ROOT)); + }); + + private final BiFunction apply; + + NameExtractor(BiFunction apply) { + this.apply = apply; + } + + public final String extract(Long millis, String tzId) { + return apply.apply(millis, tzId); + } + } + + public static final String NAME = "ndt"; + + private final NameExtractor extractor; + + public NamedDateTimeProcessor(NameExtractor extractor, TimeZone timeZone) { + super(timeZone); + this.extractor = extractor; + } + + public NamedDateTimeProcessor(StreamInput in) throws IOException { + super(in); + extractor = in.readEnum(NameExtractor.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeEnum(extractor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + NameExtractor extractor() { + return extractor; + } + + @Override + public Object doProcess(long millis) { + return extractor.extract(millis, timeZone().getID()); + } + + @Override + public int hashCode() { + return Objects.hash(extractor, timeZone()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + NamedDateTimeProcessor other = (NamedDateTimeProcessor) obj; + return Objects.equals(extractor, other.extractor) + && Objects.equals(timeZone(), other.timeZone()); + } + + @Override + public String toString() { + return extractor.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java new file mode 100644 index 00000000000..22e368b0ec6 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; +import org.elasticsearch.xpack.sql.type.DataType; +import org.joda.time.DateTime; + +import java.util.Objects; +import java.util.TimeZone; + +import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor.quarter; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; + +public class Quarter extends BaseDateTimeFunction { + + protected static final String QUARTER_FORMAT = "q"; + + public Quarter(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public Object fold() { + DateTime folded = (DateTime) field().fold(); + if (folded == null) { + return null; + } + + return quarter(folded.getMillis(), timeZone().getID()); + } + + @Override + protected ScriptTemplate asScriptFrom(FieldAttribute field) { + ParamsBuilder params = paramsBuilder(); + + String template = null; + template = formatTemplate("{sql}.quarter(doc[{}].value.millis, {})"); + params.variable(field.name()) + .variable(timeZone().getID()); + + return new ScriptTemplate(template, params.build(), dataType()); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return Quarter::new; + } + + @Override + protected Quarter replaceChild(Expression newChild) { + return new Quarter(location(), newChild, timeZone()); + } + + @Override + protected ProcessorDefinition makeProcessorDefinition() { + return new UnaryProcessorDefinition(location(), this, ProcessorDefinitions.toProcessorDefinition(field()), + new QuarterProcessor(timeZone())); + } + + @Override + public DataType dataType() { + return DataType.INTEGER; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + BaseDateTimeFunction other = (BaseDateTimeFunction) obj; + return Objects.equals(other.field(), field()) + && Objects.equals(other.timeZone(), timeZone()); + } + + @Override + public int hashCode() { + return Objects.hash(field(), timeZone()); + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java new file mode 100644 index 00000000000..c6904216d0f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.Locale; +import java.util.Objects; +import java.util.TimeZone; + +public class QuarterProcessor extends BaseDateTimeProcessor { + + public QuarterProcessor(TimeZone timeZone) { + super(timeZone); + } + + public QuarterProcessor(StreamInput in) throws IOException { + super(in); + } + + public static final String NAME = "q"; + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object doProcess(long millis) { + return quarter(millis, timeZone().getID()); + } + + public static Integer quarter(long millis, String tzId) { + ZonedDateTime time = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(tzId)); + return Integer.valueOf(time.format(DateTimeFormatter.ofPattern(Quarter.QUARTER_FORMAT, Locale.ROOT))); + } + + @Override + public int hashCode() { + return Objects.hash(timeZone()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + DateTimeProcessor other = (DateTimeProcessor) obj; + return Objects.equals(timeZone(), other.timeZone()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java index 883502c017d..3522eb10ffe 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java @@ -22,7 +22,7 @@ public class SecondOfMinute extends DateTimeFunction { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return SecondOfMinute::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java index eef2c48ad0f..59948165f71 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java @@ -22,7 +22,7 @@ public class WeekOfYear extends DateTimeFunction { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return WeekOfYear::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java index 28d475e4c70..2b065329be3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java @@ -22,7 +22,7 @@ public class Year extends DateTimeHistogramFunction { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return Year::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java index 921b6edaef6..a3fdfa654df 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java @@ -21,7 +21,7 @@ public class E extends MathFunction { private static final ScriptTemplate TEMPLATE = new ScriptTemplate("Math.E", Params.EMPTY, DataType.DOUBLE); public E(Location location) { - super(location, new Literal(location, Math.E, DataType.DOUBLE)); + super(location, new Literal(location, "E", Math.E, DataType.DOUBLE)); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java index 9758843ee5d..e57aa333f06 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java @@ -21,7 +21,7 @@ public class Pi extends MathFunction { private static final ScriptTemplate TEMPLATE = new ScriptTemplate("Math.PI", Params.EMPTY, DataType.DOUBLE); public Pi(Location location) { - super(location, new Literal(location, Math.PI, DataType.DOUBLE)); + super(location, new Literal(location, "PI", Math.PI, DataType.DOUBLE)); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java index 9325986ac1f..3834b16ff1e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java @@ -22,7 +22,7 @@ import java.util.Locale; import static java.lang.String.format; import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; -import static org.elasticsearch.xpack.sql.expression.function.scalar.string.SubstringFunctionProcessor.doProcess; +import static org.elasticsearch.xpack.sql.expression.function.scalar.string.ReplaceFunctionProcessor.doProcess; /** * Search the source string for occurrences of the pattern, and replace with the replacement string. diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java index 12faeb78b66..f0a79f15e36 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java @@ -6,6 +6,8 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.whitelist; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BinaryStringNumericProcessor.BinaryStringNumericOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BinaryStringStringProcessor.BinaryStringStringOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.string.ConcatFunctionProcessor; @@ -28,6 +30,18 @@ public final class InternalSqlScriptUtils { return DateTimeFunction.dateTimeChrono(millis, tzId, chronoName); } + public static String dayName(long millis, String tzId) { + return NameExtractor.DAY_NAME.extract(millis, tzId); + } + + public static String monthName(long millis, String tzId) { + return NameExtractor.MONTH_NAME.extract(millis, tzId); + } + + public static Integer quarter(long millis, String tzId) { + return QuarterProcessor.quarter(millis, tzId); + } + public static Integer ascii(String s) { return (Integer) StringOperation.ASCII.apply(s); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java index 55c4112d38b..72105a2fae8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -1118,36 +1118,12 @@ public class Optimizer extends RuleExecutor { @Override protected Expression rule(Expression e) { - // handle aliases to avoid double aliasing of functions - // alias points to function which gets folded and wrapped in an alias that is - // aliases if (e instanceof Alias) { Alias a = (Alias) e; - Expression fold = fold(a.child()); - if (fold != a.child()) { - return new Alias(a.location(), a.name(), null, fold, a.id()); - } - return a; + return a.child().foldable() ? Literal.of(a.name(), a.child()) : a; } - Expression fold = fold(e); - if (fold != e) { - // preserve the name through an alias - if (e instanceof NamedExpression) { - NamedExpression ne = (NamedExpression) e; - return new Alias(e.location(), ne.name(), null, fold, ne.id()); - } - return fold; - } - return e; - } - - private Expression fold(Expression e) { - // literals are always foldable, so avoid creating a duplicate - if (e.foldable() && !(e instanceof Literal)) { - return new Literal(e.location(), e.fold(), e.dataType()); - } - return e; + return e.foldable() ? Literal.of(e) : e; } } @@ -1836,14 +1812,11 @@ public class Optimizer extends RuleExecutor { private List extractConstants(List named) { List values = new ArrayList<>(); for (NamedExpression n : named) { - if (n instanceof Alias) { - Alias a = (Alias) n; - if (a.child().foldable()) { - values.add(a.child().fold()); - } - else { - return values; - } + if (n.foldable()) { + values.add(n.fold()); + } else { + // not everything is foldable, bail-out early + return values; } } return values; diff --git a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt index 8f86685889c..0f12d32d44e 100644 --- a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt +++ b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt @@ -9,6 +9,9 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalSqlScriptUtils { Integer dateTimeChrono(long, String, String) + String dayName(long, String) + String monthName(long, String) + Integer quarter(long, String) Integer ascii(String) Integer bitLength(String) String character(Number) diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/LiteralTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/LiteralTests.java index 8527c5b62df..d6bd6ab96b2 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/LiteralTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/LiteralTests.java @@ -61,7 +61,7 @@ public class LiteralTests extends AbstractNodeTestCase { @Override protected Literal copy(Literal instance) { - return new Literal(instance.location(), instance.value(), instance.dataType()); + return new Literal(instance.location(), instance.name(), instance.value(), instance.dataType()); } @Override diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java new file mode 100644 index 00000000000..828a16f5aa9 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.TimeZone; + +public class NamedDateTimeProcessorTests extends AbstractWireSerializingTestCase { + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); + + public static NamedDateTimeProcessor randomNamedDateTimeProcessor() { + return new NamedDateTimeProcessor(randomFrom(NameExtractor.values()), UTC); + } + + @Override + protected NamedDateTimeProcessor createTestInstance() { + return randomNamedDateTimeProcessor(); + } + + @Override + protected Reader instanceReader() { + return NamedDateTimeProcessor::new; + } + + @Override + protected NamedDateTimeProcessor mutateInstance(NamedDateTimeProcessor instance) throws IOException { + NameExtractor replaced = randomValueOtherThan(instance.extractor(), () -> randomFrom(NameExtractor.values())); + return new NamedDateTimeProcessor(replaced, UTC); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33621") + public void testValidDayNamesInUTC() { + NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.DAY_NAME, UTC); + assertEquals("Thursday", proc.process("0")); + assertEquals("Saturday", proc.process("-64164233612338")); + assertEquals("Monday", proc.process("64164233612338")); + + assertEquals("Thursday", proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals("Thursday", proc.process(new DateTime(-5400, 12, 25, 2, 0, DateTimeZone.UTC))); + assertEquals("Friday", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); + assertEquals("Tuesday", proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33621") + public void testValidDayNamesWithNonUTCTimeZone() { + NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.DAY_NAME, TimeZone.getTimeZone("GMT-10:00")); + assertEquals("Wednesday", proc.process("0")); + assertEquals("Friday", proc.process("-64164233612338")); + assertEquals("Monday", proc.process("64164233612338")); + + assertEquals("Wednesday", proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals("Wednesday", proc.process(new DateTime(-5400, 12, 25, 2, 0, DateTimeZone.UTC))); + assertEquals("Friday", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); + assertEquals("Tuesday", proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); + assertEquals("Monday", proc.process(new DateTime(10902, 8, 22, 9, 59, DateTimeZone.UTC))); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33621") + public void testValidMonthNamesInUTC() { + NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.MONTH_NAME, UTC); + assertEquals("January", proc.process("0")); + assertEquals("September", proc.process("-64164233612338")); + assertEquals("April", proc.process("64164233612338")); + + assertEquals("January", proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals("December", proc.process(new DateTime(-5400, 12, 25, 10, 10, DateTimeZone.UTC))); + assertEquals("February", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); + assertEquals("August", proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33621") + public void testValidMonthNamesWithNonUTCTimeZone() { + NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.MONTH_NAME, TimeZone.getTimeZone("GMT-3:00")); + assertEquals("December", proc.process("0")); + assertEquals("August", proc.process("-64165813612338")); // GMT: Tuesday, September 1, -0064 2:53:07.662 AM + assertEquals("April", proc.process("64164233612338")); // GMT: Monday, April 14, 4003 2:13:32.338 PM + + assertEquals("December", proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals("November", proc.process(new DateTime(-5400, 12, 1, 1, 1, DateTimeZone.UTC))); + assertEquals("February", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); + assertEquals("July", proc.process(new DateTime(10902, 8, 1, 2, 59, DateTimeZone.UTC))); + assertEquals("August", proc.process(new DateTime(10902, 8, 1, 3, 00, DateTimeZone.UTC))); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessorTests.java new file mode 100644 index 00000000000..7747bb8cae4 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessorTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.test.ESTestCase; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.util.TimeZone; + +public class QuarterProcessorTests extends ESTestCase { + + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); + + public void testQuarterWithUTCTimezone() { + QuarterProcessor proc = new QuarterProcessor(UTC); + + assertEquals(1, proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals(4, proc.process(new DateTime(-5400, 12, 25, 10, 10, DateTimeZone.UTC))); + assertEquals(1, proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); + assertEquals(3, proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); + + assertEquals(1, proc.process("0")); + assertEquals(3, proc.process("-64164233612338")); + assertEquals(2, proc.process("64164233612338")); + } + + public void testValidDayNamesWithNonUTCTimeZone() { + QuarterProcessor proc = new QuarterProcessor(TimeZone.getTimeZone("GMT-10:00")); + assertEquals(4, proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals(4, proc.process(new DateTime(-5400, 1, 1, 5, 0, DateTimeZone.UTC))); + assertEquals(1, proc.process(new DateTime(30, 4, 1, 9, 59, DateTimeZone.UTC))); + + proc = new QuarterProcessor(TimeZone.getTimeZone("GMT+10:00")); + assertEquals(4, proc.process(new DateTime(10902, 9, 30, 14, 1, DateTimeZone.UTC))); + assertEquals(3, proc.process(new DateTime(10902, 9, 30, 13, 59, DateTimeZone.UTC))); + + assertEquals(1, proc.process("0")); + assertEquals(3, proc.process("-64164233612338")); + assertEquals(2, proc.process("64164233612338")); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java index ed4e54701dc..07349008c07 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -85,6 +85,14 @@ public class OptimizerTests extends ESTestCase { private static final Expression DUMMY_EXPRESSION = new DummyBooleanExpression(EMPTY, 0); + private static final Literal ONE = L(1); + private static final Literal TWO = L(2); + private static final Literal THREE = L(3); + private static final Literal FOUR = L(4); + private static final Literal FIVE = L(5); + private static final Literal SIX = L(6); + + public static class DummyBooleanExpression extends Expression { private final int id; @@ -161,7 +169,7 @@ public class OptimizerTests extends ESTestCase { public void testCombineProjections() { // a - Alias a = new Alias(EMPTY, "a", L(5)); + Alias a = new Alias(EMPTY, "a", FIVE); // b Alias b = new Alias(EMPTY, "b", L(10)); // x -> a @@ -187,7 +195,7 @@ public class OptimizerTests extends ESTestCase { // SELECT 5 a, 10 b FROM foo WHERE a < 10 ORDER BY b // a - Alias a = new Alias(EMPTY, "a", L(5)); + Alias a = new Alias(EMPTY, "a", FIVE); // b Alias b = new Alias(EMPTY, "b", L(10)); // WHERE a < 10 @@ -226,49 +234,44 @@ public class OptimizerTests extends ESTestCase { // public void testConstantFolding() { - Expression exp = new Add(EMPTY, L(2), L(3)); + Expression exp = new Add(EMPTY, TWO, THREE); assertTrue(exp.foldable()); assertTrue(exp instanceof NamedExpression); String n = Expressions.name(exp); Expression result = new ConstantFolding().rule(exp); - assertTrue(result instanceof Alias); + assertTrue(result instanceof Literal); assertEquals(n, Expressions.name(result)); - Expression c = ((Alias) result).child(); - assertTrue(c instanceof Literal); - assertEquals(5, ((Literal) c).value()); + assertEquals(5, ((Literal) result).value()); // check now with an alias result = new ConstantFolding().rule(new Alias(EMPTY, "a", exp)); - assertTrue(result instanceof Alias); assertEquals("a", Expressions.name(result)); - c = ((Alias) result).child(); - assertTrue(c instanceof Literal); - assertEquals(5, ((Literal) c).value()); + assertEquals(5, ((Literal) result).value()); } public void testConstantFoldingBinaryComparison() { - assertEquals(Literal.FALSE, new ConstantFolding().rule(new GreaterThan(EMPTY, L(2), L(3)))); - assertEquals(Literal.FALSE, new ConstantFolding().rule(new GreaterThanOrEqual(EMPTY, L(2), L(3)))); - assertEquals(Literal.FALSE, new ConstantFolding().rule(new Equals(EMPTY, L(2), L(3)))); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new LessThanOrEqual(EMPTY, L(2), L(3)))); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new LessThan(EMPTY, L(2), L(3)))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new GreaterThan(EMPTY, TWO, THREE))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new GreaterThanOrEqual(EMPTY, TWO, THREE))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new Equals(EMPTY, TWO, THREE))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new LessThanOrEqual(EMPTY, TWO, THREE))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new LessThan(EMPTY, TWO, THREE))); } public void testConstantFoldingBinaryLogic() { - assertEquals(Literal.FALSE, new ConstantFolding().rule(new And(EMPTY, new GreaterThan(EMPTY, L(2), L(3)), Literal.TRUE))); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new Or(EMPTY, new GreaterThanOrEqual(EMPTY, L(2), L(3)), Literal.TRUE))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new And(EMPTY, new GreaterThan(EMPTY, TWO, THREE), Literal.TRUE))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new Or(EMPTY, new GreaterThanOrEqual(EMPTY, TWO, THREE), Literal.TRUE))); } public void testConstantFoldingRange() { - assertEquals(Literal.TRUE, new ConstantFolding().rule(new Range(EMPTY, L(5), L(5), true, L(10), false))); - assertEquals(Literal.FALSE, new ConstantFolding().rule(new Range(EMPTY, L(5), L(5), false, L(10), false))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new Range(EMPTY, FIVE, FIVE, true, L(10), false))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new Range(EMPTY, FIVE, FIVE, false, L(10), false))); } public void testConstantIsNotNull() { assertEquals(Literal.FALSE, new ConstantFolding().rule(new IsNotNull(EMPTY, L(null)))); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new IsNotNull(EMPTY, L(5)))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new IsNotNull(EMPTY, FIVE))); } public void testConstantNot() { @@ -296,30 +299,24 @@ public class OptimizerTests extends ESTestCase { } public void testArithmeticFolding() { - assertEquals(10, foldFunction(new Add(EMPTY, L(7), L(3)))); - assertEquals(4, foldFunction(new Sub(EMPTY, L(7), L(3)))); - assertEquals(21, foldFunction(new Mul(EMPTY, L(7), L(3)))); - assertEquals(2, foldFunction(new Div(EMPTY, L(7), L(3)))); - assertEquals(1, foldFunction(new Mod(EMPTY, L(7), L(3)))); + assertEquals(10, foldFunction(new Add(EMPTY, L(7), THREE))); + assertEquals(4, foldFunction(new Sub(EMPTY, L(7), THREE))); + assertEquals(21, foldFunction(new Mul(EMPTY, L(7), THREE))); + assertEquals(2, foldFunction(new Div(EMPTY, L(7), THREE))); + assertEquals(1, foldFunction(new Mod(EMPTY, L(7), THREE))); } public void testMathFolding() { assertEquals(7, foldFunction(new Abs(EMPTY, L(7)))); - assertEquals(0d, (double) foldFunction(new ACos(EMPTY, L(1))), 0.01d); - assertEquals(1.57076d, (double) foldFunction(new ASin(EMPTY, L(1))), 0.01d); - assertEquals(0.78539d, (double) foldFunction(new ATan(EMPTY, L(1))), 0.01d); + assertEquals(0d, (double) foldFunction(new ACos(EMPTY, ONE)), 0.01d); + assertEquals(1.57076d, (double) foldFunction(new ASin(EMPTY, ONE)), 0.01d); + assertEquals(0.78539d, (double) foldFunction(new ATan(EMPTY, ONE)), 0.01d); assertEquals(7, foldFunction(new Floor(EMPTY, L(7)))); assertEquals(Math.E, foldFunction(new E(EMPTY))); } private static Object foldFunction(Function f) { - return unwrapAlias(new ConstantFolding().rule(f)); - } - - private static Object unwrapAlias(Expression e) { - Alias a = (Alias) e; - Literal l = (Literal) a.child(); - return l.value(); + return ((Literal) new ConstantFolding().rule(f)).value(); } // @@ -327,21 +324,21 @@ public class OptimizerTests extends ESTestCase { // public void testBinaryComparisonSimplification() { - assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new Equals(EMPTY, L(5), L(5)))); - assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new GreaterThanOrEqual(EMPTY, L(5), L(5)))); - assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new LessThanOrEqual(EMPTY, L(5), L(5)))); + assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new Equals(EMPTY, FIVE, FIVE))); + assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new GreaterThanOrEqual(EMPTY, FIVE, FIVE))); + assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new LessThanOrEqual(EMPTY, FIVE, FIVE))); - assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new GreaterThan(EMPTY, L(5), L(5)))); - assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new LessThan(EMPTY, L(5), L(5)))); + assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new GreaterThan(EMPTY, FIVE, FIVE))); + assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new LessThan(EMPTY, FIVE, FIVE))); } public void testLiteralsOnTheRight() { Alias a = new Alias(EMPTY, "a", L(10)); - Expression result = new BooleanLiteralsOnTheRight().rule(new Equals(EMPTY, L(5), a)); + Expression result = new BooleanLiteralsOnTheRight().rule(new Equals(EMPTY, FIVE, a)); assertTrue(result instanceof Equals); Equals eq = (Equals) result; assertEquals(a, eq.left()); - assertEquals(L(5), eq.right()); + assertEquals(FIVE, eq.right()); } public void testBoolSimplifyOr() { @@ -390,7 +387,7 @@ public class OptimizerTests extends ESTestCase { public void testFoldExcludingRangeToFalse() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r = new Range(EMPTY, fa, L(6), false, L(5), true); + Range r = new Range(EMPTY, fa, SIX, false, FIVE, true); assertTrue(r.foldable()); assertEquals(Boolean.FALSE, r.fold()); } @@ -399,7 +396,7 @@ public class OptimizerTests extends ESTestCase { public void testFoldExcludingRangeWithDifferentTypesToFalse() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r = new Range(EMPTY, fa, L(6), false, L(5.5d), true); + Range r = new Range(EMPTY, fa, SIX, false, L(5.5d), true); assertTrue(r.foldable()); assertEquals(Boolean.FALSE, r.fold()); } @@ -408,7 +405,7 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsNotComparable() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, L(6)); + LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, SIX); LessThan lt = new LessThan(EMPTY, fa, Literal.FALSE); CombineBinaryComparisons rule = new CombineBinaryComparisons(); @@ -420,71 +417,71 @@ public class OptimizerTests extends ESTestCase { // a <= 6 AND a < 5 -> a < 5 public void testCombineBinaryComparisonsUpper() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, L(6)); - LessThan lt = new LessThan(EMPTY, fa, L(5)); + LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, SIX); + LessThan lt = new LessThan(EMPTY, fa, FIVE); CombineBinaryComparisons rule = new CombineBinaryComparisons(); Expression exp = rule.rule(new And(EMPTY, lte, lt)); assertEquals(LessThan.class, exp.getClass()); LessThan r = (LessThan) exp; - assertEquals(L(5), r.right()); + assertEquals(FIVE, r.right()); } // 6 <= a AND 5 < a -> 6 <= a public void testCombineBinaryComparisonsLower() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(6)); - GreaterThan gt = new GreaterThan(EMPTY, fa, L(5)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, SIX); + GreaterThan gt = new GreaterThan(EMPTY, fa, FIVE); CombineBinaryComparisons rule = new CombineBinaryComparisons(); Expression exp = rule.rule(new And(EMPTY, gte, gt)); assertEquals(GreaterThanOrEqual.class, exp.getClass()); GreaterThanOrEqual r = (GreaterThanOrEqual) exp; - assertEquals(L(6), r.right()); + assertEquals(SIX, r.right()); } // 5 <= a AND 5 < a -> 5 < a public void testCombineBinaryComparisonsInclude() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(5)); - GreaterThan gt = new GreaterThan(EMPTY, fa, L(5)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, FIVE); + GreaterThan gt = new GreaterThan(EMPTY, fa, FIVE); CombineBinaryComparisons rule = new CombineBinaryComparisons(); Expression exp = rule.rule(new And(EMPTY, gte, gt)); assertEquals(GreaterThan.class, exp.getClass()); GreaterThan r = (GreaterThan) exp; - assertEquals(L(5), r.right()); + assertEquals(FIVE, r.right()); } // 3 <= a AND 4 < a AND a <= 7 AND a < 6 -> 4 < a < 6 public void testCombineMultipleBinaryComparisons() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(3)); - GreaterThan gt = new GreaterThan(EMPTY, fa, L(4)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, THREE); + GreaterThan gt = new GreaterThan(EMPTY, fa, FOUR); LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, L(7)); - LessThan lt = new LessThan(EMPTY, fa, L(6)); + LessThan lt = new LessThan(EMPTY, fa, SIX); CombineBinaryComparisons rule = new CombineBinaryComparisons(); Expression exp = rule.rule(new And(EMPTY, gte, new And(EMPTY, gt, new And(EMPTY, lt, lte)))); assertEquals(Range.class, exp.getClass()); Range r = (Range) exp; - assertEquals(L(4), r.lower()); + assertEquals(FOUR, r.lower()); assertFalse(r.includeLower()); - assertEquals(L(6), r.upper()); + assertEquals(SIX, r.upper()); assertFalse(r.includeUpper()); } // 3 <= a AND TRUE AND 4 < a AND a != 5 AND a <= 7 -> 4 < a <= 7 AND a != 5 AND TRUE public void testCombineMixedMultipleBinaryComparisons() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(3)); - GreaterThan gt = new GreaterThan(EMPTY, fa, L(4)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, THREE); + GreaterThan gt = new GreaterThan(EMPTY, fa, FOUR); LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, L(7)); - Expression ne = new Not(EMPTY, new Equals(EMPTY, fa, L(5))); + Expression ne = new Not(EMPTY, new Equals(EMPTY, fa, FIVE)); CombineBinaryComparisons rule = new CombineBinaryComparisons(); @@ -494,7 +491,7 @@ public class OptimizerTests extends ESTestCase { And and = ((And) exp); assertEquals(Range.class, and.right().getClass()); Range r = (Range) and.right(); - assertEquals(L(4), r.lower()); + assertEquals(FOUR, r.lower()); assertFalse(r.includeLower()); assertEquals(L(7), r.upper()); assertTrue(r.includeUpper()); @@ -503,17 +500,17 @@ public class OptimizerTests extends ESTestCase { // 1 <= a AND a < 5 -> 1 <= a < 5 public void testCombineComparisonsIntoRange() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(1)); - LessThan lt = new LessThan(EMPTY, fa, L(5)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, ONE); + LessThan lt = new LessThan(EMPTY, fa, FIVE); CombineBinaryComparisons rule = new CombineBinaryComparisons(); Expression exp = rule.rule(new And(EMPTY, gte, lt)); assertEquals(Range.class, rule.rule(exp).getClass()); Range r = (Range) exp; - assertEquals(L(1), r.lower()); + assertEquals(ONE, r.lower()); assertTrue(r.includeLower()); - assertEquals(L(5), r.upper()); + assertEquals(FIVE, r.upper()); assertFalse(r.includeUpper()); } @@ -521,10 +518,10 @@ public class OptimizerTests extends ESTestCase { public void testCombineUnbalancedComparisonsMixedWithEqualsIntoRange() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); IsNotNull isn = new IsNotNull(EMPTY, fa); - GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(1)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, ONE); Equals eq = new Equals(EMPTY, fa, L(10)); - LessThan lt = new LessThan(EMPTY, fa, L(5)); + LessThan lt = new LessThan(EMPTY, fa, FIVE); And and = new And(EMPTY, new And(EMPTY, isn, gte), new And(EMPTY, lt, eq)); @@ -535,9 +532,9 @@ public class OptimizerTests extends ESTestCase { assertEquals(Range.class, a.right().getClass()); Range r = (Range) a.right(); - assertEquals(L(1), r.lower()); + assertEquals(ONE, r.lower()); assertTrue(r.includeLower()); - assertEquals(L(5), r.upper()); + assertEquals(FIVE, r.upper()); assertFalse(r.includeUpper()); } @@ -545,8 +542,8 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsConjunctionOfIncludedRange() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(1), false, L(4), false); + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, ONE, false, FOUR, false); And and = new And(EMPTY, r1, r2); @@ -559,8 +556,8 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsConjunctionOfNonOverlappingBoundaries() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(1), false, L(2), false); + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, ONE, false, TWO, false); And and = new And(EMPTY, r1, r2); @@ -568,9 +565,9 @@ public class OptimizerTests extends ESTestCase { Expression exp = rule.rule(and); assertEquals(Range.class, exp.getClass()); Range r = (Range) exp; - assertEquals(L(2), r.lower()); + assertEquals(TWO, r.lower()); assertFalse(r.includeLower()); - assertEquals(L(2), r.upper()); + assertEquals(TWO, r.upper()); assertFalse(r.includeUpper()); assertEquals(Boolean.FALSE, r.fold()); } @@ -579,8 +576,8 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsConjunctionOfUpperEqualsOverlappingBoundaries() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(2), false, L(3), true); + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, TWO, false, THREE, true); And and = new And(EMPTY, r1, r2); @@ -593,8 +590,8 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsConjunctionOverlappingUpperBoundary() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r2 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r1 = new Range(EMPTY, fa, L(1), false, L(3), false); + Range r2 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r1 = new Range(EMPTY, fa, ONE, false, THREE, false); And and = new And(EMPTY, r1, r2); @@ -607,8 +604,8 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsConjunctionWithDifferentUpperLimitInclusion() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(1), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(2), false, L(3), true); + Range r1 = new Range(EMPTY, fa, ONE, false, THREE, false); + Range r2 = new Range(EMPTY, fa, TWO, false, THREE, true); And and = new And(EMPTY, r1, r2); @@ -616,9 +613,9 @@ public class OptimizerTests extends ESTestCase { Expression exp = rule.rule(and); assertEquals(Range.class, exp.getClass()); Range r = (Range) exp; - assertEquals(L(2), r.lower()); + assertEquals(TWO, r.lower()); assertFalse(r.includeLower()); - assertEquals(L(3), r.upper()); + assertEquals(THREE, r.upper()); assertFalse(r.includeUpper()); } @@ -626,8 +623,8 @@ public class OptimizerTests extends ESTestCase { public void testRangesOverlappingConjunctionNoLowerBoundary() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(0), false, L(1), true); - Range r2 = new Range(EMPTY, fa, L(0), true, L(2), false); + Range r1 = new Range(EMPTY, fa, L(0), false, ONE, true); + Range r2 = new Range(EMPTY, fa, L(0), true, TWO, false); And and = new And(EMPTY, r1, r2); @@ -641,7 +638,7 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsDisjunctionNotComparable() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThan gt1 = new GreaterThan(EMPTY, fa, L(1)); + GreaterThan gt1 = new GreaterThan(EMPTY, fa, ONE); GreaterThan gt2 = new GreaterThan(EMPTY, fa, Literal.FALSE); Or or = new Or(EMPTY, gt1, gt2); @@ -656,9 +653,9 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsDisjunctionLowerBound() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThan gt1 = new GreaterThan(EMPTY, fa, L(1)); - GreaterThan gt2 = new GreaterThan(EMPTY, fa, L(2)); - GreaterThan gt3 = new GreaterThan(EMPTY, fa, L(3)); + GreaterThan gt1 = new GreaterThan(EMPTY, fa, ONE); + GreaterThan gt2 = new GreaterThan(EMPTY, fa, TWO); + GreaterThan gt3 = new GreaterThan(EMPTY, fa, THREE); Or or = new Or(EMPTY, gt1, new Or(EMPTY, gt2, gt3)); @@ -667,16 +664,16 @@ public class OptimizerTests extends ESTestCase { assertEquals(GreaterThan.class, exp.getClass()); GreaterThan gt = (GreaterThan) exp; - assertEquals(L(1), gt.right()); + assertEquals(ONE, gt.right()); } // 2 < a OR 1 < a OR 3 <= a -> 1 < a public void testCombineBinaryComparisonsDisjunctionIncludeLowerBounds() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThan gt1 = new GreaterThan(EMPTY, fa, L(1)); - GreaterThan gt2 = new GreaterThan(EMPTY, fa, L(2)); - GreaterThanOrEqual gte3 = new GreaterThanOrEqual(EMPTY, fa, L(3)); + GreaterThan gt1 = new GreaterThan(EMPTY, fa, ONE); + GreaterThan gt2 = new GreaterThan(EMPTY, fa, TWO); + GreaterThanOrEqual gte3 = new GreaterThanOrEqual(EMPTY, fa, THREE); Or or = new Or(EMPTY, new Or(EMPTY, gt1, gt2), gte3); @@ -685,16 +682,16 @@ public class OptimizerTests extends ESTestCase { assertEquals(GreaterThan.class, exp.getClass()); GreaterThan gt = (GreaterThan) exp; - assertEquals(L(1), gt.right()); + assertEquals(ONE, gt.right()); } // a < 1 OR a < 2 OR a < 3 -> a < 3 public void testCombineBinaryComparisonsDisjunctionUpperBound() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - LessThan lt1 = new LessThan(EMPTY, fa, L(1)); - LessThan lt2 = new LessThan(EMPTY, fa, L(2)); - LessThan lt3 = new LessThan(EMPTY, fa, L(3)); + LessThan lt1 = new LessThan(EMPTY, fa, ONE); + LessThan lt2 = new LessThan(EMPTY, fa, TWO); + LessThan lt3 = new LessThan(EMPTY, fa, THREE); Or or = new Or(EMPTY, new Or(EMPTY, lt1, lt2), lt3); @@ -703,16 +700,16 @@ public class OptimizerTests extends ESTestCase { assertEquals(LessThan.class, exp.getClass()); LessThan lt = (LessThan) exp; - assertEquals(L(3), lt.right()); + assertEquals(THREE, lt.right()); } // a < 2 OR a <= 2 OR a < 1 -> a <= 2 public void testCombineBinaryComparisonsDisjunctionIncludeUpperBounds() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - LessThan lt1 = new LessThan(EMPTY, fa, L(1)); - LessThan lt2 = new LessThan(EMPTY, fa, L(2)); - LessThanOrEqual lte2 = new LessThanOrEqual(EMPTY, fa, L(2)); + LessThan lt1 = new LessThan(EMPTY, fa, ONE); + LessThan lt2 = new LessThan(EMPTY, fa, TWO); + LessThanOrEqual lte2 = new LessThanOrEqual(EMPTY, fa, TWO); Or or = new Or(EMPTY, lt2, new Or(EMPTY, lte2, lt1)); @@ -721,18 +718,18 @@ public class OptimizerTests extends ESTestCase { assertEquals(LessThanOrEqual.class, exp.getClass()); LessThanOrEqual lte = (LessThanOrEqual) exp; - assertEquals(L(2), lte.right()); + assertEquals(TWO, lte.right()); } // a < 2 OR 3 < a OR a < 1 OR 4 < a -> a < 2 OR 3 < a public void testCombineBinaryComparisonsDisjunctionOfLowerAndUpperBounds() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - LessThan lt1 = new LessThan(EMPTY, fa, L(1)); - LessThan lt2 = new LessThan(EMPTY, fa, L(2)); + LessThan lt1 = new LessThan(EMPTY, fa, ONE); + LessThan lt2 = new LessThan(EMPTY, fa, TWO); - GreaterThan gt3 = new GreaterThan(EMPTY, fa, L(3)); - GreaterThan gt4 = new GreaterThan(EMPTY, fa, L(4)); + GreaterThan gt3 = new GreaterThan(EMPTY, fa, THREE); + GreaterThan gt4 = new GreaterThan(EMPTY, fa, FOUR); Or or = new Or(EMPTY, new Or(EMPTY, lt2, gt3), new Or(EMPTY, lt1, gt4)); @@ -744,18 +741,18 @@ public class OptimizerTests extends ESTestCase { assertEquals(LessThan.class, ro.left().getClass()); LessThan lt = (LessThan) ro.left(); - assertEquals(L(2), lt.right()); + assertEquals(TWO, lt.right()); assertEquals(GreaterThan.class, ro.right().getClass()); GreaterThan gt = (GreaterThan) ro.right(); - assertEquals(L(3), gt.right()); + assertEquals(THREE, gt.right()); } // (2 < a < 3) OR (1 < a < 4) -> (1 < a < 4) public void testCombineBinaryComparisonsDisjunctionOfIncludedRangeNotComparable() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(1), false, Literal.FALSE, false); + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, ONE, false, Literal.FALSE, false); Or or = new Or(EMPTY, r1, r2); @@ -769,8 +766,9 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsDisjunctionOfIncludedRange() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(1), false, L(4), false); + + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, ONE, false, FOUR, false); Or or = new Or(EMPTY, r1, r2); @@ -779,9 +777,9 @@ public class OptimizerTests extends ESTestCase { assertEquals(Range.class, exp.getClass()); Range r = (Range) exp; - assertEquals(L(1), r.lower()); + assertEquals(ONE, r.lower()); assertFalse(r.includeLower()); - assertEquals(L(4), r.upper()); + assertEquals(FOUR, r.upper()); assertFalse(r.includeUpper()); } @@ -789,8 +787,8 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsDisjunctionOfNonOverlappingBoundaries() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(1), false, L(2), false); + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, ONE, false, TWO, false); Or or = new Or(EMPTY, r1, r2); @@ -803,8 +801,8 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsDisjunctionOfUpperEqualsOverlappingBoundaries() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(2), false, L(3), true); + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, TWO, false, THREE, true); Or or = new Or(EMPTY, r1, r2); @@ -817,8 +815,8 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsOverlappingUpperBoundary() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r2 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r1 = new Range(EMPTY, fa, L(1), false, L(3), false); + Range r2 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r1 = new Range(EMPTY, fa, ONE, false, THREE, false); Or or = new Or(EMPTY, r1, r2); @@ -831,8 +829,8 @@ public class OptimizerTests extends ESTestCase { public void testCombineBinaryComparisonsWithDifferentUpperLimitInclusion() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(1), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(2), false, L(3), true); + Range r1 = new Range(EMPTY, fa, ONE, false, THREE, false); + Range r2 = new Range(EMPTY, fa, TWO, false, THREE, true); Or or = new Or(EMPTY, r1, r2); @@ -845,8 +843,8 @@ public class OptimizerTests extends ESTestCase { public void testRangesOverlappingNoLowerBoundary() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r2 = new Range(EMPTY, fa, L(0), false, L(2), false); - Range r1 = new Range(EMPTY, fa, L(0), false, L(1), true); + Range r2 = new Range(EMPTY, fa, L(0), false, TWO, false); + Range r1 = new Range(EMPTY, fa, L(0), false, ONE, true); Or or = new Or(EMPTY, r1, r2); @@ -860,8 +858,8 @@ public class OptimizerTests extends ESTestCase { // a == 1 AND a == 2 -> FALSE public void testDualEqualsConjunction() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Equals eq1 = new Equals(EMPTY, fa, L(1)); - Equals eq2 = new Equals(EMPTY, fa, L(2)); + Equals eq1 = new Equals(EMPTY, fa, ONE); + Equals eq2 = new Equals(EMPTY, fa, TWO); PropagateEquals rule = new PropagateEquals(); Expression exp = rule.rule(new And(EMPTY, eq1, eq2)); @@ -871,8 +869,8 @@ public class OptimizerTests extends ESTestCase { // 1 <= a < 10 AND a == 1 -> a == 1 public void testEliminateRangeByEqualsInInterval() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Equals eq1 = new Equals(EMPTY, fa, L(1)); - Range r = new Range(EMPTY, fa, L(1), true, L(10), false); + Equals eq1 = new Equals(EMPTY, fa, ONE); + Range r = new Range(EMPTY, fa, ONE, true, L(10), false); PropagateEquals rule = new PropagateEquals(); Expression exp = rule.rule(new And(EMPTY, eq1, r)); @@ -883,7 +881,7 @@ public class OptimizerTests extends ESTestCase { public void testEliminateRangeByEqualsOutsideInterval() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); Equals eq1 = new Equals(EMPTY, fa, L(10)); - Range r = new Range(EMPTY, fa, L(1), false, L(10), false); + Range r = new Range(EMPTY, fa, ONE, false, L(10), false); PropagateEquals rule = new PropagateEquals(); Expression exp = rule.rule(new And(EMPTY, eq1, r)); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/snapshot/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/snapshot/10_basic.yml new file mode 100644 index 00000000000..c0f161472b7 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/snapshot/10_basic.yml @@ -0,0 +1,84 @@ +--- +setup: + + - do: + snapshot.create_repository: + repository: test_repo_restore_1 + body: + type: source + settings: + delegate_type: fs + location: "test_repo_restore_1_loc" + + - do: + indices.create: + index: test_index + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + +--- +"Create a source only snapshot and then restore it": + + - do: + index: + index: test_index + type: _doc + id: 1 + body: { foo: bar } + - do: + indices.flush: + index: test_index + + - do: + snapshot.create: + repository: test_repo_restore_1 + snapshot: test_snapshot + wait_for_completion: true + + - match: { snapshot.snapshot: test_snapshot } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.successful: 1 } + - match: { snapshot.shards.failed : 0 } + - is_true: snapshot.version + - gt: { snapshot.version_id: 0} + + - do: + indices.close: + index : test_index + + - do: + snapshot.restore: + repository: test_repo_restore_1 + snapshot: test_snapshot + wait_for_completion: true + + - do: + indices.recovery: + index: test_index + + - match: { test_index.shards.0.type: SNAPSHOT } + - match: { test_index.shards.0.stage: DONE } + - match: { test_index.shards.0.translog.recovered: 0} + - match: { test_index.shards.0.translog.total: 0} + - match: { test_index.shards.0.translog.total_on_start: 0} + - match: { test_index.shards.0.index.files.recovered: 5} + - match: { test_index.shards.0.index.files.reused: 0} + - match: { test_index.shards.0.index.size.reused_in_bytes: 0} + - gt: { test_index.shards.0.index.size.recovered_in_bytes: 0} + + - do: + search: + index: test_index + body: + query: + match_all: {} + + - match: {hits.total: 1 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "1" } diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index ab8f9172b69..c0fb7eb2b77 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -155,9 +155,6 @@ subprojects { // some tests rely on the translog not being flushed setting 'indices.memory.shard_inactive_time', '20m' - // debug logging for testRecovery see https://github.com/elastic/x-pack-elasticsearch/issues/2691 - setting 'logger.level', 'DEBUG' - setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' setting 'xpack.ssl.keystore.path', 'testnode.jks' @@ -185,6 +182,7 @@ subprojects { systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' } @@ -201,9 +199,6 @@ subprojects { setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' waitCondition = waitWithAuth - // debug logging for testRecovery see https://github.com/elastic/x-pack-elasticsearch/issues/2691 - setting 'logger.level', 'DEBUG' - // some tests rely on the translog not being flushed setting 'indices.memory.shard_inactive_time', '20m' setting 'xpack.security.enabled', 'true' @@ -224,6 +219,7 @@ subprojects { systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' } diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 7c4eda37d2f..8a6944fb870 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; @@ -18,6 +17,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -54,35 +54,13 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; -public class FullClusterRestartIT extends ESRestTestCase { - private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); - private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); +public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { @Before public void waitForMlTemplates() throws Exception { XPackRestTestHelper.waitForMlTemplates(client()); } - @Override - protected boolean preserveIndicesUponCompletion() { - return true; - } - - @Override - protected boolean preserveSnapshotsUponCompletion() { - return true; - } - - @Override - protected boolean preserveReposUponCompletion() { - return true; - } - - @Override - protected boolean preserveTemplatesUponCompletion() { - return true; - } - @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); @@ -103,7 +81,7 @@ public class FullClusterRestartIT extends ESRestTestCase { String docLocation = "/testsingledoc/doc/1"; String doc = "{\"test\": \"test\"}"; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Request createDoc = new Request("PUT", docLocation); createDoc.addParameter("refresh", "true"); createDoc.setJsonEntity(doc); @@ -115,7 +93,7 @@ public class FullClusterRestartIT extends ESRestTestCase { @SuppressWarnings("unchecked") public void testSecurityNativeRealm() throws Exception { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { createUser("preupgrade_user"); createRole("preupgrade_role"); } else { @@ -165,15 +143,15 @@ public class FullClusterRestartIT extends ESRestTestCase { assertUserInfo("preupgrade_user"); assertRoleInfo("preupgrade_role"); - if (!runningAgainstOldCluster) { + if (isRunningAgainstOldCluster() == false) { assertUserInfo("postupgrade_user"); assertRoleInfo("postupgrade_role"); } } public void testWatcher() throws Exception { - if (runningAgainstOldCluster) { - logger.info("Adding a watch on old cluster {}", oldClusterVersion); + if (isRunningAgainstOldCluster()) { + logger.info("Adding a watch on old cluster {}", getOldClusterVersion()); Request createBwcWatch = new Request("PUT", "_xpack/watcher/watch/bwc_watch"); createBwcWatch.setJsonEntity(loadWatch("simple-watch.json")); client().performRequest(createBwcWatch); @@ -194,7 +172,7 @@ public class FullClusterRestartIT extends ESRestTestCase { waitForHits(".watcher-history*", 2); logger.info("Done creating watcher-related indices"); } else { - logger.info("testing against {}", oldClusterVersion); + logger.info("testing against {}", getOldClusterVersion()); waitForYellow(".watches,bwc_watch_index,.watcher-history*"); logger.info("checking if the upgrade procedure on the new cluster is required"); @@ -264,8 +242,8 @@ public class FullClusterRestartIT extends ESRestTestCase { * Tests that a RollUp job created on a old cluster is correctly restarted after the upgrade. */ public void testRollupAfterRestart() throws Exception { - assumeTrue("Rollup can be tested with 6.3.0 and onwards", oldClusterVersion.onOrAfter(Version.V_6_3_0)); - if (runningAgainstOldCluster) { + assumeTrue("Rollup can be tested with 6.3.0 and onwards", getOldClusterVersion().onOrAfter(Version.V_6_3_0)); + if (isRunningAgainstOldCluster()) { final int numDocs = 59; final int year = randomIntBetween(1970, 2018); @@ -315,7 +293,7 @@ public class FullClusterRestartIT extends ESRestTestCase { final Request clusterHealthRequest = new Request("GET", "/_cluster/health"); clusterHealthRequest.addParameter("wait_for_status", "yellow"); clusterHealthRequest.addParameter("wait_for_no_relocating_shards", "true"); - if (oldClusterVersion.onOrAfter(Version.V_6_2_0)) { + if (getOldClusterVersion().onOrAfter(Version.V_6_2_0)) { clusterHealthRequest.addParameter("wait_for_no_initializing_shards", "true"); } Map clusterHealthResponse = entityAsMap(client().performRequest(clusterHealthRequest)); @@ -326,9 +304,9 @@ public class FullClusterRestartIT extends ESRestTestCase { } public void testRollupIDSchemeAfterRestart() throws Exception { - assumeTrue("Rollup can be tested with 6.3.0 and onwards", oldClusterVersion.onOrAfter(Version.V_6_3_0)); - assumeTrue("Rollup ID scheme changed in 6.4", oldClusterVersion.before(Version.V_6_4_0)); - if (runningAgainstOldCluster) { + assumeTrue("Rollup can be tested with 6.3.0 and onwards", getOldClusterVersion().onOrAfter(Version.V_6_3_0)); + assumeTrue("Rollup ID scheme changed in 6.4", getOldClusterVersion().before(Version.V_6_4_0)); + if (isRunningAgainstOldCluster()) { final Request indexRequest = new Request("POST", "/id-test-rollup/_doc/1"); indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-01T00:00:01\",\"value\":123}"); @@ -439,8 +417,8 @@ public class FullClusterRestartIT extends ESRestTestCase { public void testSqlFailsOnIndexWithTwoTypes() throws IOException { // TODO this isn't going to trigger until we backport to 6.1 assumeTrue("It is only possible to build an index that sql doesn't like before 6.0.0", - oldClusterVersion.before(Version.V_6_0_0_alpha1)); - if (runningAgainstOldCluster) { + getOldClusterVersion().before(Version.V_6_0_0_alpha1)); + if (isRunningAgainstOldCluster()) { Request doc1 = new Request("POST", "/testsqlfailsonindexwithtwotypes/type1"); doc1.setJsonEntity("{}"); client().performRequest(doc1); @@ -550,7 +528,7 @@ public class FullClusterRestartIT extends ESRestTestCase { request.addParameter("wait_for_status", "yellow"); request.addParameter("timeout", "30s"); request.addParameter("wait_for_no_relocating_shards", "true"); - if (oldClusterVersion.onOrAfter(Version.V_6_2_0)) { + if (getOldClusterVersion().onOrAfter(Version.V_6_2_0)) { request.addParameter("wait_for_no_initializing_shards", "true"); } Map response = entityAsMap(client().performRequest(request)); @@ -668,7 +646,7 @@ public class FullClusterRestartIT extends ESRestTestCase { // Persistent task state field has been renamed in 6.4.0 from "status" to "state" final String stateFieldName - = (runningAgainstOldCluster && oldClusterVersion.before(Version.V_6_4_0)) ? "status" : "state"; + = (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_4_0)) ? "status" : "state"; final String jobStateField = "task.xpack/rollup/job." + stateFieldName + ".job_state"; assertThat("Expected field [" + jobStateField + "] to be started or indexing in " + task.get("id"), diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java new file mode 100644 index 00000000000..a679604a546 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class FullClusterRestartSettingsUpgradeIT extends org.elasticsearch.upgrades.FullClusterRestartSettingsUpgradeIT { + + @Override + protected Settings restClientSettings() { + final String token = + "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java index f5b9381c54b..601dca8abd4 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java @@ -65,6 +65,8 @@ public abstract class ShowTestCase extends CliIntegrationTestCase { assertThat(readLine(), RegexMatcher.matches("\\s*DAY_OF_YEAR\\s*\\|\\s*SCALAR\\s*")); assertThat(readLine(), RegexMatcher.matches("\\s*HOUR_OF_DAY\\s*\\|\\s*SCALAR\\s*")); assertThat(readLine(), RegexMatcher.matches("\\s*MINUTE_OF_DAY\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*DAY_NAME\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*DAYNAME\\s*\\|\\s*SCALAR\\s*")); assertEquals("", readLine()); } } diff --git a/x-pack/qa/sql/src/main/resources/agg.csv-spec b/x-pack/qa/sql/src/main/resources/agg.csv-spec index 1d9592d963d..d274e5379c9 100644 --- a/x-pack/qa/sql/src/main/resources/agg.csv-spec +++ b/x-pack/qa/sql/src/main/resources/agg.csv-spec @@ -74,6 +74,30 @@ SELECT SUM(salary) FROM test_emp; 4824855 ; +aggregateWithCastPruned +SELECT CAST(SUM(salary) AS INTEGER) FROM test_emp; + + SUM(salary) +------------- +4824855 +; + +aggregateWithUpCast +SELECT CAST(SUM(salary) AS DOUBLE) FROM test_emp; + + CAST(SUM(salary) AS DOUBLE) +----------------------------- +4824855.0 +; + +aggregateWithCastNumericToString +SELECT CAST(AVG(salary) AS VARCHAR) FROM test_emp; + + CAST(AVG(salary) AS VARCHAR):s +-------------------------------- +48248.55 +; + kurtosisAndSkewnessNoGroup SELECT KURTOSIS(emp_no) k, SKEWNESS(salary) s FROM test_emp; diff --git a/x-pack/qa/sql/src/main/resources/agg.sql-spec b/x-pack/qa/sql/src/main/resources/agg.sql-spec index a86b8b65eef..e2213caa597 100644 --- a/x-pack/qa/sql/src/main/resources/agg.sql-spec +++ b/x-pack/qa/sql/src/main/resources/agg.sql-spec @@ -90,6 +90,10 @@ aggCountImplicit SELECT COUNT(*) AS count FROM test_emp; aggCountImplicitWithCast SELECT CAST(COUNT(*) AS INT) c FROM "test_emp"; +aggCountImplicitWithUpCast +SELECT CAST(COUNT(*) AS DOUBLE) c FROM "test_emp"; +aggCountImplicitWithPrunedCast +SELECT CAST(COUNT(*) AS BIGINT) c FROM "test_emp"; aggCountImplicitWithConstant SELECT COUNT(1) FROM "test_emp"; aggCountImplicitWithConstantAndFilter @@ -184,6 +188,10 @@ SELECT MIN(emp_no) AS min FROM test_emp; // end::min aggMinImplicitWithCast SELECT CAST(MIN(emp_no) AS SMALLINT) m FROM "test_emp"; +aggMinImplicitWithUpCast +SELECT CAST(MIN(emp_no) AS DOUBLE) m FROM "test_emp"; +aggMinImplicitWithPrunedCast +SELECT CAST(MIN(emp_no) AS INTEGER) m FROM "test_emp"; aggMin SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY gender ORDER BY gender; aggMinWithCast @@ -236,6 +244,10 @@ aggMaxImplicit SELECT MAX(salary) AS max FROM test_emp; aggMaxImplicitWithCast SELECT CAST(MAX(emp_no) AS SMALLINT) c FROM "test_emp"; +aggMaxImplicitWithUpCast +SELECT CAST(MAX(emp_no) AS DOUBLE) c FROM "test_emp"; +aggMaxImplicitWithPrunedCast +SELECT CAST(MAX(emp_no) AS INTEGER) c FROM "test_emp"; aggMax SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY gender ORDER BY gender; aggMaxWithCast @@ -268,6 +280,10 @@ SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND MAX( // SUM aggSumImplicitWithCast SELECT CAST(SUM(emp_no) AS BIGINT) s FROM "test_emp"; +aggSumImplicitWithUpCast +SELECT CAST(SUM(emp_no) AS DOUBLE) s FROM "test_emp"; +aggSumImplicitWithUpCast +SELECT CAST(SUM(emp_no) AS INTEGER) s FROM "test_emp"; aggSumWithCast SELECT gender g, CAST(SUM(emp_no) AS BIGINT) s FROM "test_emp" GROUP BY gender ORDER BY gender; aggSumWithCastAndCount @@ -298,6 +314,8 @@ SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s // AVG aggAvgImplicitWithCast SELECT CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp"; +aggAvgImplicitWithUpCast +SELECT CAST(AVG(emp_no) AS DOUBLE) a FROM "test_emp"; aggAvgWithCastToFloat SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY gender ORDER BY gender; // casting to an exact type - varchar, bigint, etc... will likely fail due to rounding error diff --git a/x-pack/qa/sql/src/main/resources/command.csv-spec b/x-pack/qa/sql/src/main/resources/command.csv-spec index 77d397fa2b5..28aadeded2c 100644 --- a/x-pack/qa/sql/src/main/resources/command.csv-spec +++ b/x-pack/qa/sql/src/main/resources/command.csv-spec @@ -38,6 +38,11 @@ MONTH |SCALAR YEAR |SCALAR WEEK_OF_YEAR |SCALAR WEEK |SCALAR +DAY_NAME |SCALAR +DAYNAME |SCALAR +MONTH_NAME |SCALAR +MONTHNAME |SCALAR +QUARTER |SCALAR ABS |SCALAR ACOS |SCALAR ASIN |SCALAR @@ -130,6 +135,8 @@ DAY_OF_WEEK |SCALAR DAY_OF_YEAR |SCALAR HOUR_OF_DAY |SCALAR MINUTE_OF_DAY |SCALAR +DAY_NAME |SCALAR +DAYNAME |SCALAR ; showTables diff --git a/x-pack/qa/sql/src/main/resources/datetime.sql-spec b/x-pack/qa/sql/src/main/resources/datetime.sql-spec index 20ea8329c8f..81012b7bebf 100644 --- a/x-pack/qa/sql/src/main/resources/datetime.sql-spec +++ b/x-pack/qa/sql/src/main/resources/datetime.sql-spec @@ -12,34 +12,83 @@ dateTimeDay SELECT DAY(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + dateTimeDayOfMonth SELECT DAY_OF_MONTH(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + dateTimeMonth SELECT MONTH(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + dateTimeYear SELECT YEAR(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +monthNameFromStringDate +SELECT MONTHNAME(CAST('2018-09-03' AS TIMESTAMP)) month FROM "test_emp" limit 1; + +dayNameFromStringDate +SELECT DAYNAME(CAST('2018-09-03' AS TIMESTAMP)) day FROM "test_emp" limit 1; + +quarterSelect +SELECT QUARTER(hire_date) q, hire_date FROM test_emp ORDER BY hire_date LIMIT 15; + // // Filter // + dateTimeFilterDayOfMonth SELECT DAY_OF_MONTH(birth_date) AS d, last_name l FROM "test_emp" WHERE DAY_OF_MONTH(birth_date) <= 10 ORDER BY emp_no LIMIT 5; + dateTimeFilterMonth SELECT MONTH(birth_date) AS d, last_name l FROM "test_emp" WHERE MONTH(birth_date) <= 5 ORDER BY emp_no LIMIT 5; + dateTimeFilterYear SELECT YEAR(birth_date) AS d, last_name l FROM "test_emp" WHERE YEAR(birth_date) <= 1960 ORDER BY emp_no LIMIT 5; +monthNameFilterWithFirstLetter +SELECT MONTHNAME(hire_date) AS m, hire_date FROM "test_emp" WHERE LEFT(MONTHNAME(hire_date), 1) = 'J' ORDER BY hire_date LIMIT 10; + +monthNameFilterWithFullName +SELECT MONTHNAME(hire_date) AS m, hire_date FROM "test_emp" WHERE MONTHNAME(hire_date) = 'August' ORDER BY hire_date LIMIT 10; + +dayNameFilterWithFullName +SELECT DAYNAME(hire_date) AS d, hire_date FROM "test_emp" WHERE DAYNAME(hire_date) = 'Sunday' ORDER BY hire_date LIMIT 10; + +dayNameAndMonthNameAsFilter +SELECT first_name, last_name FROM "test_emp" WHERE DAYNAME(hire_date) = 'Sunday' AND MONTHNAME(hire_date) = 'January' ORDER BY hire_date LIMIT 10; + +quarterWithFilter +SELECT QUARTER(hire_date) quarter, hire_date FROM test_emp WHERE QUARTER(hire_date) > 2 ORDER BY hire_date LIMIT 15; // // Aggregate // - dateTimeAggByYear SELECT YEAR(birth_date) AS d, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY YEAR(birth_date) ORDER BY YEAR(birth_date) LIMIT 13; -dateTimeAggByMonth +dateTimeAggByMonthWithOrderBy SELECT MONTH(birth_date) AS d, COUNT(*) AS c, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY MONTH(birth_date) ORDER BY MONTH(birth_date) DESC; -dateTimeAggByDayOfMonth +dateTimeAggByDayOfMonthWithOrderBy SELECT DAY_OF_MONTH(birth_date) AS d, COUNT(*) AS c, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY DAY_OF_MONTH(birth_date) ORDER BY DAY_OF_MONTH(birth_date) DESC; + +monthNameWithGroupBy +SELECT MONTHNAME("hire_date") AS month, COUNT(*) AS count FROM "test_emp" GROUP BY MONTHNAME("hire_date"), MONTH("hire_date") ORDER BY MONTH("hire_date"); + +monthNameWithDoubleGroupByAndOrderBy +SELECT MONTHNAME("hire_date") AS month, COUNT(*) AS count FROM "test_emp" GROUP BY MONTHNAME("hire_date"), MONTH("hire_date") ORDER BY MONTHNAME("hire_date") DESC; + +// AwaitsFix https://github.com/elastic/elasticsearch/issues/33519 +// monthNameWithGroupByOrderByAndHaving +// SELECT CAST(MAX("salary") AS DOUBLE) max_salary, MONTHNAME("hire_date") month_name FROM "test_emp" GROUP BY MONTHNAME("hire_date") HAVING MAX("salary") > 50000 ORDER BY MONTHNAME(hire_date); +// dayNameWithHaving +// SELECT DAYNAME("hire_date") FROM "test_emp" GROUP BY DAYNAME("hire_date") HAVING MAX("emp_no") > ASCII(DAYNAME("hire_date")); + +dayNameWithDoubleGroupByAndOrderBy +SELECT COUNT(*) c, DAYNAME(hire_date) day_name, DAY(hire_date) day FROM test_emp WHERE MONTHNAME(hire_date) = 'August' GROUP BY DAYNAME(hire_date), DAY(hire_date) ORDER BY DAYNAME(hire_date), DAY(hire_date); + +dayNameWithGroupByOrderByAndHaving +SELECT CAST(MAX(salary) AS DOUBLE) max_salary, DAYNAME(hire_date) day_name FROM test_emp GROUP BY DAYNAME(hire_date) HAVING MAX(salary) > 50000 ORDER BY DAYNAME("hire_date"); + +quarterWithGroupByAndOrderBy +SELECT QUARTER(hire_date) quarter, COUNT(*) hires FROM test_emp GROUP BY QUARTER(hire_date) ORDER BY QUARTER(hire_date); \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/docs.csv-spec b/x-pack/qa/sql/src/main/resources/docs.csv-spec index 2a4f29fcf5d..52356bdfd52 100644 --- a/x-pack/qa/sql/src/main/resources/docs.csv-spec +++ b/x-pack/qa/sql/src/main/resources/docs.csv-spec @@ -214,6 +214,11 @@ MONTH |SCALAR YEAR |SCALAR WEEK_OF_YEAR |SCALAR WEEK |SCALAR +DAY_NAME |SCALAR +DAYNAME |SCALAR +MONTH_NAME |SCALAR +MONTHNAME |SCALAR +QUARTER |SCALAR ABS |SCALAR ACOS |SCALAR ASIN |SCALAR @@ -318,7 +323,9 @@ DAY |SCALAR DAY_OF_WEEK |SCALAR DAY_OF_YEAR |SCALAR HOUR_OF_DAY |SCALAR -MINUTE_OF_DAY |SCALAR +MINUTE_OF_DAY |SCALAR +DAY_NAME |SCALAR +DAYNAME |SCALAR // end::showFunctionsWithPattern ; diff --git a/x-pack/qa/sql/src/main/resources/string-functions.sql-spec b/x-pack/qa/sql/src/main/resources/string-functions.sql-spec index 15bb6dea935..c0b0430b278 100644 --- a/x-pack/qa/sql/src/main/resources/string-functions.sql-spec +++ b/x-pack/qa/sql/src/main/resources/string-functions.sql-spec @@ -1,5 +1,6 @@ stringAscii SELECT ASCII(first_name) s FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + stringChar SELECT CHAR(emp_no % 10000) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; @@ -9,6 +10,9 @@ SELECT emp_no, ASCII(first_name) a FROM "test_emp" WHERE ASCII(first_name) < 100 stringAsciiEqualsConstant SELECT emp_no, ASCII(first_name) a, first_name name FROM "test_emp" WHERE ASCII(first_name) = 65 ORDER BY emp_no; +stringAsciiInline +SELECT ASCII('E') e; + //https://github.com/elastic/elasticsearch/issues/31863 //stringSelectConstantAsciiEqualsConstant //SELECT ASCII('A') = 65 a FROM "test_emp" WHERE ASCII('A') = 65 ORDER BY emp_no; @@ -16,12 +20,105 @@ SELECT emp_no, ASCII(first_name) a, first_name name FROM "test_emp" WHERE ASCII( stringCharFilter SELECT emp_no, CHAR(emp_no % 10000) m FROM "test_emp" WHERE CHAR(emp_no % 10000) = 'A'; +stringSelectCharInline1 +SELECT CHAR(250) c; + +stringSelectCharInline2 +SELECT CHAR(2) c; + +charLengthInline1 +SELECT CAST(CHAR_LENGTH('Elasticsearch') AS INT) charlength; + +charLengthInline2 +SELECT CAST(CHAR_LENGTH(' Elasticsearch ') AS INT) charlength; + +charLengthInline3 +SELECT CAST(CHAR_LENGTH('') AS INT) charlength; + +concatInline1 +SELECT CONCAT('Elastic','search') concat; + +concatInline2 +SELECT CONCAT(CONCAT('Lucene And ', 'Elastic'),'search') concat; + +concatInline3 +SELECT CONCAT(CONCAT('Lucene And ', 'Elastic'),CONCAT('search','')) concat; + lcaseFilter SELECT LCASE(first_name) lc, CHAR(ASCII(LCASE(first_name))) chr FROM "test_emp" WHERE CHAR(ASCII(LCASE(first_name))) = 'a'; +lcaseInline1 +SELECT LCASE('') L; + +lcaseInline2 +SELECT LCASE('ElAsTiC fantastic') lower; + +leftInline1 +SELECT LEFT('Elasticsearch', 7) leftchars; + +leftInline2 +SELECT LEFT('Elasticsearch', 1) leftchars; + +leftInline3 +SELECT LEFT('Elasticsearch', 25) leftchars; + +leftInline4 +SELECT LEFT('Elasticsearch', LENGTH('abcdefghijklmnop')) leftchars; + ltrimFilter SELECT LTRIM(first_name) lt FROM "test_emp" WHERE LTRIM(first_name) = 'Bob'; +ltrimInline1 +SELECT LTRIM(' Elastic ') trimmed; + +ltrimInline2 +SELECT LTRIM(' ') trimmed; + +locateInline1 +SELECT LOCATE('a', 'Elasticsearch', 8) location; + +locateInline2 +SELECT LOCATE('a', 'Elasticsearch') location; + +locateInline3 +SELECT LOCATE('x', 'Elasticsearch') location; + +insertInline1 +SELECT INSERT('Insert [here] your comment!', 8, 6, '(random thoughts about Elasticsearch)') ins; + +insertInline2 +SELECT INSERT('Insert [here] your comment!', 8, 20, '(random thoughts about Elasticsearch)') ins; + +insertInline3 +SELECT INSERT('Insert [here] your comment!', 8, 19, '(random thoughts about Elasticsearch)') ins; + +positionInline1 +SELECT POSITION('a','Elasticsearch') pos; + +positionInline2 +SELECT POSITION('x','Elasticsearch') pos; + +repeatInline1 +SELECT REPEAT('Elastic',2) rep; + +repeatInline2 +SELECT REPEAT('Elastic',1) rep; + +replaceInline1 +SELECT REPLACE('Elasticsearch','sea','A') repl; + +replaceInline2 +SELECT REPLACE('Elasticsearch','x','A') repl; + +rightInline1 +SELECT RIGHT('Elasticsearch', LENGTH('Search')) rightchars; + +rightInline2 +SELECT RIGHT(CONCAT('Elastic','search'), LENGTH('Search')) rightchars; + +rightInline3 +SELECT RIGHT('Elasticsearch', 0) rightchars; + // Unsupported yet // Functions combined with 'LIKE' should perform the match inside a Painless script, whereas at the moment it's handled as a regular `match` query in ES. //ltrimFilterWithLike @@ -30,15 +127,45 @@ SELECT LTRIM(first_name) lt FROM "test_emp" WHERE LTRIM(first_name) = 'Bob'; rtrimFilter SELECT RTRIM(first_name) rt FROM "test_emp" WHERE RTRIM(first_name) = 'Johnny'; +rtrimInline1 +SELECT RTRIM(' Elastic ') trimmed; + +rtrimInline2 +SELECT RTRIM(' ') trimmed; + spaceFilter SELECT SPACE(languages) spaces, languages FROM "test_emp" WHERE SPACE(languages) = ' '; spaceFilterWithLengthFunctions SELECT SPACE(languages) spaces, languages, first_name FROM "test_emp" WHERE CHAR_LENGTH(SPACE(languages)) = 3 ORDER BY first_name; +spaceInline1 +SELECT SPACE(5) space; + +spaceInline1 +SELECT SPACE(0) space; + +substringInline1 +SELECT SUBSTRING('Elasticsearch', 1, 7) sub; + +substringInline2 +SELECT SUBSTRING('Elasticsearch', 1, 15) sub; + +substringInline3 +SELECT SUBSTRING('Elasticsearch', 10, 10) sub; + ucaseFilter SELECT UCASE(gender) uppercased, COUNT(*) count FROM "test_emp" WHERE UCASE(gender) = 'F' GROUP BY UCASE(gender); +ucaseInline1 +SELECT UCASE('ElAsTiC') upper; + +ucaseInline2 +SELECT UCASE('') upper; + +ucaseInline3 +SELECT UCASE(' elastic ') upper; + // // Group and order by //